mirror of https://github.com/xqemu/xqemu.git
* MTTCG fix for win32
* virtio-scsi assertion failure * mem-prealloc coverity fix * x86 migration revert which requires more thought * x86 instruction limit (avoids >2 page translation blocks) * nbd dead code cleanup * small memory.c logic fix -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQExBAABCAAbBQJY2Te4FBxwYm9uemluaUByZWRoYXQuY29tAAoJEL/70l94x66D GyIH/jMpl0w5cdW2hxzEba5alqALKx8fz8LMFy47lSndifyr74Nbk7fq9u89m9/6 3dz92sOq4ixUt8+eWEHcy0lJqucrStdMWcA7LsSIioXfgbBN39e9NfJFshXKTSQU RSL3M5f5XvYHZqHWhk/GjzlkA2l+Dq2v7FM+DT4HISnP0fjcmGXEfadfUZi6KLao 94xXGs73pTkln9jm8N1pwn3JuJ4+FbEatrvok01nmTbA7VrrBz0zVbTZjhWz7Tu/ sqBuIBAnPNKhYZFhF8GnNrXUaIciCbw13QdT047JSfpdkSQ7IUfGt7mW48X0+q9z JCHTiTZ35d7/lqeMojgl9ANUDpk= =iED8 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * MTTCG fix for win32 * virtio-scsi assertion failure * mem-prealloc coverity fix * x86 migration revert which requires more thought * x86 instruction limit (avoids >2 page translation blocks) * nbd dead code cleanup * small memory.c logic fix # gpg: Signature made Mon 27 Mar 2017 17:03:04 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: scsi-generic: Fill in opt_xfer_len in INQUIRY reply if it is zero Revert "apic: save apic_delivered flag" nbd: drop unused NBDClientSession.is_unix field win32: replace custom mutex and condition variable with native primitives mem-prealloc: fix sysconf(_SC_NPROCESSORS_ONLN) failure case. tcg/i386: Check the size of instruction being translated virtio-scsi: Fix acquire/release in dataplane handlers virtio-scsi: Make virtio_scsi_acquire/release public clear pending status before calling memory commit Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
eb06c9e2d3
|
@ -30,8 +30,6 @@ typedef struct NBDClientSession {
|
|||
|
||||
Coroutine *recv_coroutine[MAX_NBD_REQUESTS];
|
||||
NBDReply reply;
|
||||
|
||||
bool is_unix;
|
||||
} NBDClientSession;
|
||||
|
||||
NBDClientSession *nbd_get_client_session(BlockDriverState *bs);
|
||||
|
|
|
@ -285,8 +285,6 @@ static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, Error **errp)
|
|||
goto done;
|
||||
}
|
||||
|
||||
s->client.is_unix = saddr->type == SOCKET_ADDRESS_KIND_UNIX;
|
||||
|
||||
done:
|
||||
QDECREF(addr);
|
||||
qobject_decref(crumpled_addr);
|
||||
|
|
|
@ -387,25 +387,6 @@ static bool apic_common_sipi_needed(void *opaque)
|
|||
return s->wait_for_sipi != 0;
|
||||
}
|
||||
|
||||
static bool apic_irq_delivered_needed(void *opaque)
|
||||
{
|
||||
APICCommonState *s = APIC_COMMON(opaque);
|
||||
return s->cpu == X86_CPU(first_cpu) && apic_irq_delivered != 0;
|
||||
}
|
||||
|
||||
static void apic_irq_delivered_pre_save(void *opaque)
|
||||
{
|
||||
APICCommonState *s = APIC_COMMON(opaque);
|
||||
s->apic_irq_delivered = apic_irq_delivered;
|
||||
}
|
||||
|
||||
static int apic_irq_delivered_post_load(void *opaque, int version_id)
|
||||
{
|
||||
APICCommonState *s = APIC_COMMON(opaque);
|
||||
apic_irq_delivered = s->apic_irq_delivered;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_apic_common_sipi = {
|
||||
.name = "apic_sipi",
|
||||
.version_id = 1,
|
||||
|
@ -418,19 +399,6 @@ static const VMStateDescription vmstate_apic_common_sipi = {
|
|||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_apic_irq_delivered = {
|
||||
.name = "apic_irq_delivered",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = apic_irq_delivered_needed,
|
||||
.pre_save = apic_irq_delivered_pre_save,
|
||||
.post_load = apic_irq_delivered_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_INT32(apic_irq_delivered, APICCommonState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_apic_common = {
|
||||
.name = "apic",
|
||||
.version_id = 3,
|
||||
|
@ -465,7 +433,6 @@ static const VMStateDescription vmstate_apic_common = {
|
|||
},
|
||||
.subsections = (const VMStateDescription*[]) {
|
||||
&vmstate_apic_common_sipi,
|
||||
&vmstate_apic_irq_delivered,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
|
|
@ -237,9 +237,8 @@ static void scsi_read_complete(void * opaque, int ret)
|
|||
assert(max_transfer);
|
||||
stl_be_p(&r->buf[8], max_transfer);
|
||||
/* Also take care of the opt xfer len. */
|
||||
if (ldl_be_p(&r->buf[12]) > max_transfer) {
|
||||
stl_be_p(&r->buf[12], max_transfer);
|
||||
}
|
||||
stl_be_p(&r->buf[12],
|
||||
MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
|
||||
}
|
||||
scsi_req_data(&r->req, len);
|
||||
scsi_req_unref(&r->req);
|
||||
|
|
|
@ -52,28 +52,40 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
|
|||
static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
|
||||
VirtQueue *vq)
|
||||
{
|
||||
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
|
||||
bool progress;
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
assert(s->ctx && s->dataplane_started);
|
||||
return virtio_scsi_handle_cmd_vq(s, vq);
|
||||
progress = virtio_scsi_handle_cmd_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
return progress;
|
||||
}
|
||||
|
||||
static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
|
||||
VirtQueue *vq)
|
||||
{
|
||||
bool progress;
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
assert(s->ctx && s->dataplane_started);
|
||||
return virtio_scsi_handle_ctrl_vq(s, vq);
|
||||
progress = virtio_scsi_handle_ctrl_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
return progress;
|
||||
}
|
||||
|
||||
static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
|
||||
VirtQueue *vq)
|
||||
{
|
||||
bool progress;
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
assert(s->ctx && s->dataplane_started);
|
||||
return virtio_scsi_handle_event_vq(s, vq);
|
||||
progress = virtio_scsi_handle_event_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
return progress;
|
||||
}
|
||||
|
||||
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
|
||||
|
|
|
@ -422,31 +422,15 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void virtio_scsi_acquire(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_acquire(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void virtio_scsi_release(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
|
||||
{
|
||||
VirtIOSCSIReq *req;
|
||||
bool progress = false;
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
while ((req = virtio_scsi_pop_req(s, vq))) {
|
||||
progress = true;
|
||||
virtio_scsi_handle_ctrl_req(s, req);
|
||||
}
|
||||
virtio_scsi_release(s);
|
||||
return progress;
|
||||
}
|
||||
|
||||
|
@ -460,7 +444,9 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
}
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_handle_ctrl_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
|
||||
|
@ -604,7 +590,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|||
|
||||
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
do {
|
||||
virtio_queue_set_notification(vq, 0);
|
||||
|
||||
|
@ -632,7 +617,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|||
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
|
||||
virtio_scsi_handle_cmd_req_submit(s, req);
|
||||
}
|
||||
virtio_scsi_release(s);
|
||||
return progress;
|
||||
}
|
||||
|
||||
|
@ -647,7 +631,9 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
}
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_handle_cmd_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_get_config(VirtIODevice *vdev,
|
||||
|
@ -723,12 +709,10 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
virtio_scsi_acquire(s);
|
||||
|
||||
req = virtio_scsi_pop_req(s, vs->event_vq);
|
||||
if (!req) {
|
||||
s->events_dropped = true;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->events_dropped) {
|
||||
|
@ -738,7 +722,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
|
||||
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
|
||||
virtio_scsi_bad_req(req);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
evt = &req->resp.event;
|
||||
|
@ -758,19 +742,14 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
evt->lun[3] = dev->lun & 0xFF;
|
||||
}
|
||||
virtio_scsi_complete_req(req);
|
||||
out:
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
|
||||
{
|
||||
virtio_scsi_acquire(s);
|
||||
if (s->events_dropped) {
|
||||
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
|
||||
virtio_scsi_release(s);
|
||||
return true;
|
||||
}
|
||||
virtio_scsi_release(s);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -784,7 +763,9 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
|
|||
return;
|
||||
}
|
||||
}
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_handle_event_vq(s, vq);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
|
||||
|
@ -794,8 +775,10 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
|
|||
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
|
||||
dev->type != TYPE_ROM) {
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
|
||||
sense.asc | (sense.ascq << 8));
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -817,9 +800,11 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
}
|
||||
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_push_event(s, sd,
|
||||
VIRTIO_SCSI_T_TRANSPORT_RESET,
|
||||
VIRTIO_SCSI_EVT_RESET_RESCAN);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -831,9 +816,11 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
SCSIDevice *sd = SCSI_DEVICE(dev);
|
||||
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
||||
virtio_scsi_acquire(s);
|
||||
virtio_scsi_push_event(s, sd,
|
||||
VIRTIO_SCSI_T_TRANSPORT_RESET,
|
||||
VIRTIO_SCSI_EVT_RESET_REMOVED);
|
||||
virtio_scsi_release(s);
|
||||
}
|
||||
|
||||
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
|
||||
|
|
|
@ -189,8 +189,6 @@ struct APICCommonState {
|
|||
DeviceState *vapic;
|
||||
hwaddr vapic_paddr; /* note: persistence via kvmvapic */
|
||||
bool legacy_instance_id;
|
||||
|
||||
int apic_irq_delivered; /* for saving static variable */
|
||||
};
|
||||
|
||||
typedef struct VAPICState {
|
||||
|
|
|
@ -121,6 +121,20 @@ typedef struct VirtIOSCSIReq {
|
|||
} req;
|
||||
} VirtIOSCSIReq;
|
||||
|
||||
static inline void virtio_scsi_acquire(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_acquire(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void virtio_scsi_release(VirtIOSCSI *s)
|
||||
{
|
||||
if (s->ctx) {
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
|
||||
VirtIOHandleOutput ctrl, VirtIOHandleOutput evt,
|
||||
VirtIOHandleOutput cmd);
|
||||
|
|
|
@ -4,8 +4,7 @@
|
|||
#include <windows.h>
|
||||
|
||||
struct QemuMutex {
|
||||
CRITICAL_SECTION lock;
|
||||
LONG owner;
|
||||
SRWLOCK lock;
|
||||
};
|
||||
|
||||
typedef struct QemuRecMutex QemuRecMutex;
|
||||
|
@ -19,9 +18,7 @@ int qemu_rec_mutex_trylock(QemuRecMutex *mutex);
|
|||
void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
|
||||
|
||||
struct QemuCond {
|
||||
LONG waiters, target;
|
||||
HANDLE sema;
|
||||
HANDLE continue_event;
|
||||
CONDITION_VARIABLE var;
|
||||
};
|
||||
|
||||
struct QemuSemaphore {
|
||||
|
|
10
memory.c
10
memory.c
|
@ -906,12 +906,6 @@ void memory_region_transaction_begin(void)
|
|||
++memory_region_transaction_depth;
|
||||
}
|
||||
|
||||
static void memory_region_clear_pending(void)
|
||||
{
|
||||
memory_region_update_pending = false;
|
||||
ioeventfd_update_pending = false;
|
||||
}
|
||||
|
||||
void memory_region_transaction_commit(void)
|
||||
{
|
||||
AddressSpace *as;
|
||||
|
@ -927,14 +921,14 @@ void memory_region_transaction_commit(void)
|
|||
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
||||
address_space_update_topology(as);
|
||||
}
|
||||
|
||||
memory_region_update_pending = false;
|
||||
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
|
||||
} else if (ioeventfd_update_pending) {
|
||||
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
|
||||
address_space_update_ioeventfds(as);
|
||||
}
|
||||
ioeventfd_update_pending = false;
|
||||
}
|
||||
memory_region_clear_pending();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4418,6 +4418,13 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
|
|||
s->vex_l = 0;
|
||||
s->vex_v = 0;
|
||||
next_byte:
|
||||
/* x86 has an upper limit of 15 bytes for an instruction. Since we
|
||||
* do not want to decode and generate IR for an illegal
|
||||
* instruction, the following check limits the instruction size to
|
||||
* 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
|
||||
if (s->pc - pc_start > 14) {
|
||||
goto illegal_op;
|
||||
}
|
||||
b = cpu_ldub_code(env, s->pc);
|
||||
s->pc++;
|
||||
/* Collect prefixes. */
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include "qemu/error-report.h"
|
||||
#endif
|
||||
|
||||
#define MAX_MEM_PREALLOC_THREAD_COUNT (MIN(sysconf(_SC_NPROCESSORS_ONLN), 16))
|
||||
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
|
||||
|
||||
struct MemsetThread {
|
||||
char *addr;
|
||||
|
@ -381,6 +381,18 @@ static void *do_touch_pages(void *arg)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int get_memset_num_threads(int smp_cpus)
|
||||
{
|
||||
long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
int ret = 1;
|
||||
|
||||
if (host_procs > 0) {
|
||||
ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
|
||||
}
|
||||
/* In case sysconf() fails, we fall back to single threaded */
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
|
||||
int smp_cpus)
|
||||
{
|
||||
|
@ -389,7 +401,7 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
|
|||
int i = 0;
|
||||
|
||||
memset_thread_failed = false;
|
||||
memset_num_threads = MIN(smp_cpus, MAX_MEM_PREALLOC_THREAD_COUNT);
|
||||
memset_num_threads = get_memset_num_threads(smp_cpus);
|
||||
memset_thread = g_new0(MemsetThread, memset_num_threads);
|
||||
numpages_per_thread = (numpages / memset_num_threads);
|
||||
size_per_thread = (hpagesize * numpages_per_thread);
|
||||
|
|
|
@ -10,6 +10,11 @@
|
|||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _WIN32_WINNT
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#endif
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/thread.h"
|
||||
|
@ -39,44 +44,30 @@ static void error_exit(int err, const char *msg)
|
|||
|
||||
void qemu_mutex_init(QemuMutex *mutex)
|
||||
{
|
||||
mutex->owner = 0;
|
||||
InitializeCriticalSection(&mutex->lock);
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
}
|
||||
|
||||
void qemu_mutex_destroy(QemuMutex *mutex)
|
||||
{
|
||||
assert(mutex->owner == 0);
|
||||
DeleteCriticalSection(&mutex->lock);
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
}
|
||||
|
||||
void qemu_mutex_lock(QemuMutex *mutex)
|
||||
{
|
||||
EnterCriticalSection(&mutex->lock);
|
||||
|
||||
/* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
|
||||
* using them as such.
|
||||
*/
|
||||
assert(mutex->owner == 0);
|
||||
mutex->owner = GetCurrentThreadId();
|
||||
AcquireSRWLockExclusive(&mutex->lock);
|
||||
}
|
||||
|
||||
int qemu_mutex_trylock(QemuMutex *mutex)
|
||||
{
|
||||
int owned;
|
||||
|
||||
owned = TryEnterCriticalSection(&mutex->lock);
|
||||
if (owned) {
|
||||
assert(mutex->owner == 0);
|
||||
mutex->owner = GetCurrentThreadId();
|
||||
}
|
||||
owned = TryAcquireSRWLockExclusive(&mutex->lock);
|
||||
return !owned;
|
||||
}
|
||||
|
||||
void qemu_mutex_unlock(QemuMutex *mutex)
|
||||
{
|
||||
assert(mutex->owner == GetCurrentThreadId());
|
||||
mutex->owner = 0;
|
||||
LeaveCriticalSection(&mutex->lock);
|
||||
ReleaseSRWLockExclusive(&mutex->lock);
|
||||
}
|
||||
|
||||
void qemu_rec_mutex_init(QemuRecMutex *mutex)
|
||||
|
@ -107,124 +98,27 @@ void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
|
|||
void qemu_cond_init(QemuCond *cond)
|
||||
{
|
||||
memset(cond, 0, sizeof(*cond));
|
||||
|
||||
cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
|
||||
if (!cond->sema) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
cond->continue_event = CreateEvent(NULL, /* security */
|
||||
FALSE, /* auto-reset */
|
||||
FALSE, /* not signaled */
|
||||
NULL); /* name */
|
||||
if (!cond->continue_event) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
InitializeConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_destroy(QemuCond *cond)
|
||||
{
|
||||
BOOL result;
|
||||
result = CloseHandle(cond->continue_event);
|
||||
if (!result) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
cond->continue_event = 0;
|
||||
result = CloseHandle(cond->sema);
|
||||
if (!result) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
cond->sema = 0;
|
||||
InitializeConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_signal(QemuCond *cond)
|
||||
{
|
||||
DWORD result;
|
||||
|
||||
/*
|
||||
* Signal only when there are waiters. cond->waiters is
|
||||
* incremented by pthread_cond_wait under the external lock,
|
||||
* so we are safe about that.
|
||||
*/
|
||||
if (cond->waiters == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Waiting threads decrement it outside the external lock, but
|
||||
* only if another thread is executing pthread_cond_broadcast and
|
||||
* has the mutex. So, it also cannot be decremented concurrently
|
||||
* with this particular access.
|
||||
*/
|
||||
cond->target = cond->waiters - 1;
|
||||
result = SignalObjectAndWait(cond->sema, cond->continue_event,
|
||||
INFINITE, FALSE);
|
||||
if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
WakeConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_broadcast(QemuCond *cond)
|
||||
{
|
||||
BOOLEAN result;
|
||||
/*
|
||||
* As in pthread_cond_signal, access to cond->waiters and
|
||||
* cond->target is locked via the external mutex.
|
||||
*/
|
||||
if (cond->waiters == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
cond->target = 0;
|
||||
result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
|
||||
if (!result) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point all waiters continue. Each one takes its
|
||||
* slice of the semaphore. Now it's our turn to wait: Since
|
||||
* the external mutex is held, no thread can leave cond_wait,
|
||||
* yet. For this reason, we can be sure that no thread gets
|
||||
* a chance to eat *more* than one slice. OTOH, it means
|
||||
* that the last waiter must send us a wake-up.
|
||||
*/
|
||||
WaitForSingleObject(cond->continue_event, INFINITE);
|
||||
WakeAllConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
|
||||
{
|
||||
/*
|
||||
* This access is protected under the mutex.
|
||||
*/
|
||||
cond->waiters++;
|
||||
|
||||
/*
|
||||
* Unlock external mutex and wait for signal.
|
||||
* NOTE: we've held mutex locked long enough to increment
|
||||
* waiters count above, so there's no problem with
|
||||
* leaving mutex unlocked before we wait on semaphore.
|
||||
*/
|
||||
qemu_mutex_unlock(mutex);
|
||||
WaitForSingleObject(cond->sema, INFINITE);
|
||||
|
||||
/* Now waiters must rendez-vous with the signaling thread and
|
||||
* let it continue. For cond_broadcast this has heavy contention
|
||||
* and triggers thundering herd. So goes life.
|
||||
*
|
||||
* Decrease waiters count. The mutex is not taken, so we have
|
||||
* to do this atomically.
|
||||
*
|
||||
* All waiters contend for the mutex at the end of this function
|
||||
* until the signaling thread relinquishes it. To ensure
|
||||
* each waiter consumes exactly one slice of the semaphore,
|
||||
* the signaling thread stops until it is told by the last
|
||||
* waiter that it can go on.
|
||||
*/
|
||||
if (InterlockedDecrement(&cond->waiters) == cond->target) {
|
||||
SetEvent(cond->continue_event);
|
||||
}
|
||||
|
||||
qemu_mutex_lock(mutex);
|
||||
SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
|
||||
}
|
||||
|
||||
void qemu_sem_init(QemuSemaphore *sem, int init)
|
||||
|
|
Loading…
Reference in New Issue