mirror of https://github.com/xemu-project/xemu.git
-----BEGIN PGP SIGNATURE-----
iQEcBAABAgAGBQJYtFTTAAoJEJykq7OBq3PI93wH/RkY2roxr0WB+uJu/cWnMPFX kuTsx8xJOImRJWuVaHMeMewbF5P3KoppDpn6V2MiJTCBgLO3sYWpN0zd9w3WyOcd qOA+WjPxJvj4ttkiIAJEzcka7aStyCKSDNFZeMQi0mJRjjclrfaH84ICGEvDmPEO qeqA714jnsDewwUtTy6fUOdqVZXTXdVWv+kR0er2ovNHbmEzc8uDaw624PBOZgjN eg3Tny9z77hP3QTlqmhKr9I2gvWDj1fyBd0Bra9dByuc8vsTTehYX6qIbjRvWKsU bIdTQOM1NVjUo5QI8fpk9X4BNWU6b5rjQ6dYEfgN3fFHF33QKwanmyWX0ivDJ5s= =Pvt5 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging # gpg: Signature made Mon 27 Feb 2017 16:33:23 GMT # gpg: using RSA key 0x9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: tests-aio-multithread: use atomic_read properly iscsi: do not use aio_context_acquire/release nfs: do not use aio_context_acquire/release curl: do not use aio_context_acquire/release Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
6181478f63
24
block/curl.c
24
block/curl.c
|
@ -135,6 +135,7 @@ typedef struct BDRVCURLState {
|
|||
char *cookie;
|
||||
bool accept_range;
|
||||
AioContext *aio_context;
|
||||
QemuMutex mutex;
|
||||
char *username;
|
||||
char *password;
|
||||
char *proxyusername;
|
||||
|
@ -333,6 +334,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
|
|||
return FIND_RET_NONE;
|
||||
}
|
||||
|
||||
/* Called with s->mutex held. */
|
||||
static void curl_multi_check_completion(BDRVCURLState *s)
|
||||
{
|
||||
int msgs_in_queue;
|
||||
|
@ -374,7 +376,9 @@ static void curl_multi_check_completion(BDRVCURLState *s)
|
|||
continue;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&s->mutex);
|
||||
acb->common.cb(acb->common.opaque, -EPROTO);
|
||||
qemu_mutex_lock(&s->mutex);
|
||||
qemu_aio_unref(acb);
|
||||
state->acb[i] = NULL;
|
||||
}
|
||||
|
@ -386,6 +390,7 @@ static void curl_multi_check_completion(BDRVCURLState *s)
|
|||
}
|
||||
}
|
||||
|
||||
/* Called with s->mutex held. */
|
||||
static void curl_multi_do_locked(CURLState *s)
|
||||
{
|
||||
CURLSocket *socket, *next_socket;
|
||||
|
@ -409,19 +414,19 @@ static void curl_multi_do(void *arg)
|
|||
{
|
||||
CURLState *s = (CURLState *)arg;
|
||||
|
||||
aio_context_acquire(s->s->aio_context);
|
||||
qemu_mutex_lock(&s->s->mutex);
|
||||
curl_multi_do_locked(s);
|
||||
aio_context_release(s->s->aio_context);
|
||||
qemu_mutex_unlock(&s->s->mutex);
|
||||
}
|
||||
|
||||
static void curl_multi_read(void *arg)
|
||||
{
|
||||
CURLState *s = (CURLState *)arg;
|
||||
|
||||
aio_context_acquire(s->s->aio_context);
|
||||
qemu_mutex_lock(&s->s->mutex);
|
||||
curl_multi_do_locked(s);
|
||||
curl_multi_check_completion(s->s);
|
||||
aio_context_release(s->s->aio_context);
|
||||
qemu_mutex_unlock(&s->s->mutex);
|
||||
}
|
||||
|
||||
static void curl_multi_timeout_do(void *arg)
|
||||
|
@ -434,11 +439,11 @@ static void curl_multi_timeout_do(void *arg)
|
|||
return;
|
||||
}
|
||||
|
||||
aio_context_acquire(s->aio_context);
|
||||
qemu_mutex_lock(&s->mutex);
|
||||
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
|
||||
|
||||
curl_multi_check_completion(s);
|
||||
aio_context_release(s->aio_context);
|
||||
qemu_mutex_unlock(&s->mutex);
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
|
@ -771,6 +776,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
curl_easy_cleanup(state->curl);
|
||||
state->curl = NULL;
|
||||
|
||||
qemu_mutex_init(&s->mutex);
|
||||
curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
|
||||
|
||||
qemu_opts_del(opts);
|
||||
|
@ -801,12 +807,11 @@ static void curl_readv_bh_cb(void *p)
|
|||
CURLAIOCB *acb = p;
|
||||
BlockDriverState *bs = acb->common.bs;
|
||||
BDRVCURLState *s = bs->opaque;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
|
||||
size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
|
||||
size_t end;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
qemu_mutex_lock(&s->mutex);
|
||||
|
||||
// In case we have the requested data already (e.g. read-ahead),
|
||||
// we can just call the callback and be done.
|
||||
|
@ -854,7 +859,7 @@ static void curl_readv_bh_cb(void *p)
|
|||
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
|
||||
|
||||
out:
|
||||
aio_context_release(ctx);
|
||||
qemu_mutex_unlock(&s->mutex);
|
||||
if (ret != -EINPROGRESS) {
|
||||
acb->common.cb(acb->common.opaque, ret);
|
||||
qemu_aio_unref(acb);
|
||||
|
@ -883,6 +888,7 @@ static void curl_close(BlockDriverState *bs)
|
|||
|
||||
DPRINTF("CURL: Close\n");
|
||||
curl_detach_aio_context(bs);
|
||||
qemu_mutex_destroy(&s->mutex);
|
||||
|
||||
g_free(s->cookie);
|
||||
g_free(s->url);
|
||||
|
|
|
@ -58,6 +58,7 @@ typedef struct IscsiLun {
|
|||
int events;
|
||||
QEMUTimer *nop_timer;
|
||||
QEMUTimer *event_timer;
|
||||
QemuMutex mutex;
|
||||
struct scsi_inquiry_logical_block_provisioning lbp;
|
||||
struct scsi_inquiry_block_limits bl;
|
||||
unsigned char *zeroblock;
|
||||
|
@ -252,6 +253,7 @@ static int iscsi_translate_sense(struct scsi_sense *sense)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Called (via iscsi_service) with QemuMutex held. */
|
||||
static void
|
||||
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
|
||||
void *command_data, void *opaque)
|
||||
|
@ -352,6 +354,7 @@ static const AIOCBInfo iscsi_aiocb_info = {
|
|||
static void iscsi_process_read(void *arg);
|
||||
static void iscsi_process_write(void *arg);
|
||||
|
||||
/* Called with QemuMutex held. */
|
||||
static void
|
||||
iscsi_set_events(IscsiLun *iscsilun)
|
||||
{
|
||||
|
@ -395,10 +398,10 @@ iscsi_process_read(void *arg)
|
|||
IscsiLun *iscsilun = arg;
|
||||
struct iscsi_context *iscsi = iscsilun->iscsi;
|
||||
|
||||
aio_context_acquire(iscsilun->aio_context);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
iscsi_service(iscsi, POLLIN);
|
||||
iscsi_set_events(iscsilun);
|
||||
aio_context_release(iscsilun->aio_context);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -407,10 +410,10 @@ iscsi_process_write(void *arg)
|
|||
IscsiLun *iscsilun = arg;
|
||||
struct iscsi_context *iscsi = iscsilun->iscsi;
|
||||
|
||||
aio_context_acquire(iscsilun->aio_context);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
iscsi_service(iscsi, POLLOUT);
|
||||
iscsi_set_events(iscsilun);
|
||||
aio_context_release(iscsilun->aio_context);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
|
||||
|
@ -589,6 +592,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
|||
uint64_t lba;
|
||||
uint32_t num_sectors;
|
||||
bool fua = flags & BDRV_REQ_FUA;
|
||||
int r = 0;
|
||||
|
||||
if (fua) {
|
||||
assert(iscsilun->dpofua);
|
||||
|
@ -604,6 +608,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
|||
lba = sector_qemu2lun(sector_num, iscsilun);
|
||||
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
|
||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
retry:
|
||||
if (iscsilun->use_16_for_rw) {
|
||||
#if LIBISCSI_API_VERSION >= (20160603)
|
||||
|
@ -640,7 +645,9 @@ retry:
|
|||
#endif
|
||||
while (!iTask.complete) {
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
qemu_coroutine_yield();
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
if (iTask.task != NULL) {
|
||||
|
@ -655,12 +662,15 @@ retry:
|
|||
|
||||
if (iTask.status != SCSI_STATUS_GOOD) {
|
||||
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
|
||||
return iTask.err_code;
|
||||
r = iTask.err_code;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
|
@ -693,18 +703,21 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
|
|||
goto out;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
retry:
|
||||
if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
|
||||
sector_qemu2lun(sector_num, iscsilun),
|
||||
8 + 16, iscsi_co_generic_cb,
|
||||
&iTask) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
while (!iTask.complete) {
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
qemu_coroutine_yield();
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
if (iTask.do_retry) {
|
||||
|
@ -721,20 +734,20 @@ retry:
|
|||
* because the device is busy or the cmd is not
|
||||
* supported) we pretend all blocks are allocated
|
||||
* for backwards compatibility */
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
lbas = scsi_datain_unmarshall(iTask.task);
|
||||
if (lbas == NULL) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
lbasd = &lbas->descriptors[0];
|
||||
|
||||
if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
*pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun);
|
||||
|
@ -756,6 +769,8 @@ retry:
|
|||
if (*pnum > nb_sectors) {
|
||||
*pnum = nb_sectors;
|
||||
}
|
||||
out_unlock:
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
out:
|
||||
if (iTask.task != NULL) {
|
||||
scsi_free_scsi_task(iTask.task);
|
||||
|
@ -818,6 +833,7 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
|
|||
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
|
||||
|
||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
retry:
|
||||
if (iscsilun->use_16_for_rw) {
|
||||
#if LIBISCSI_API_VERSION >= (20160603)
|
||||
|
@ -855,7 +871,9 @@ retry:
|
|||
#endif
|
||||
while (!iTask.complete) {
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
qemu_coroutine_yield();
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
if (iTask.task != NULL) {
|
||||
|
@ -867,6 +885,7 @@ retry:
|
|||
iTask.complete = 0;
|
||||
goto retry;
|
||||
}
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
|
||||
if (iTask.status != SCSI_STATUS_GOOD) {
|
||||
return iTask.err_code;
|
||||
|
@ -881,6 +900,7 @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
|
|||
struct IscsiTask iTask;
|
||||
|
||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
retry:
|
||||
if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0,
|
||||
0, iscsi_co_generic_cb, &iTask) == NULL) {
|
||||
|
@ -889,7 +909,9 @@ retry:
|
|||
|
||||
while (!iTask.complete) {
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
qemu_coroutine_yield();
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
if (iTask.task != NULL) {
|
||||
|
@ -901,6 +923,7 @@ retry:
|
|||
iTask.complete = 0;
|
||||
goto retry;
|
||||
}
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
|
||||
if (iTask.status != SCSI_STATUS_GOOD) {
|
||||
return iTask.err_code;
|
||||
|
@ -910,6 +933,7 @@ retry:
|
|||
}
|
||||
|
||||
#ifdef __linux__
|
||||
/* Called (via iscsi_service) with QemuMutex held. */
|
||||
static void
|
||||
iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
|
||||
void *command_data, void *opaque)
|
||||
|
@ -1034,6 +1058,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
|
|||
acb->task->expxferlen = acb->ioh->dxfer_len;
|
||||
|
||||
data.size = 0;
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
if (acb->task->xfer_dir == SCSI_XFER_WRITE) {
|
||||
if (acb->ioh->iovec_count == 0) {
|
||||
data.data = acb->ioh->dxferp;
|
||||
|
@ -1049,6 +1074,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
|
|||
iscsi_aio_ioctl_cb,
|
||||
(data.size > 0) ? &data : NULL,
|
||||
acb) != 0) {
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
scsi_free_scsi_task(acb->task);
|
||||
qemu_aio_unref(acb);
|
||||
return NULL;
|
||||
|
@ -1068,6 +1094,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
|
||||
return &acb->common;
|
||||
}
|
||||
|
@ -1092,6 +1119,7 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
|
|||
IscsiLun *iscsilun = bs->opaque;
|
||||
struct IscsiTask iTask;
|
||||
struct unmap_list list;
|
||||
int r = 0;
|
||||
|
||||
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
|
||||
return -ENOTSUP;
|
||||
|
@ -1106,15 +1134,19 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
|
|||
list.num = count / iscsilun->block_size;
|
||||
|
||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
retry:
|
||||
if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
|
||||
iscsi_co_generic_cb, &iTask) == NULL) {
|
||||
return -ENOMEM;
|
||||
r = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
while (!iTask.complete) {
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
qemu_coroutine_yield();
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
if (iTask.task != NULL) {
|
||||
|
@ -1131,17 +1163,20 @@ retry:
|
|||
/* the target might fail with a check condition if it
|
||||
is not happy with the alignment of the UNMAP request
|
||||
we silently fail in this case */
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (iTask.status != SCSI_STATUS_GOOD) {
|
||||
return iTask.err_code;
|
||||
r = iTask.err_code;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
|
||||
count >> BDRV_SECTOR_BITS);
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1153,6 +1188,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
|||
uint64_t lba;
|
||||
uint32_t nb_blocks;
|
||||
bool use_16_for_ws = iscsilun->use_16_for_rw;
|
||||
int r = 0;
|
||||
|
||||
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
|
||||
return -ENOTSUP;
|
||||
|
@ -1186,6 +1222,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
|||
}
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||
retry:
|
||||
if (use_16_for_ws) {
|
||||
|
@ -1205,7 +1242,9 @@ retry:
|
|||
|
||||
while (!iTask.complete) {
|
||||
iscsi_set_events(iscsilun);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
qemu_coroutine_yield();
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
if (iTask.status == SCSI_STATUS_CHECK_CONDITION &&
|
||||
|
@ -1215,7 +1254,8 @@ retry:
|
|||
/* WRITE SAME is not supported by the target */
|
||||
iscsilun->has_write_same = false;
|
||||
scsi_free_scsi_task(iTask.task);
|
||||
return -ENOTSUP;
|
||||
r = -ENOTSUP;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (iTask.task != NULL) {
|
||||
|
@ -1231,7 +1271,8 @@ retry:
|
|||
if (iTask.status != SCSI_STATUS_GOOD) {
|
||||
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
|
||||
count >> BDRV_SECTOR_BITS);
|
||||
return iTask.err_code;
|
||||
r = iTask.err_code;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (flags & BDRV_REQ_MAY_UNMAP) {
|
||||
|
@ -1242,7 +1283,9 @@ retry:
|
|||
count >> BDRV_SECTOR_BITS);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts,
|
||||
|
@ -1331,7 +1374,7 @@ static void iscsi_nop_timed_event(void *opaque)
|
|||
{
|
||||
IscsiLun *iscsilun = opaque;
|
||||
|
||||
aio_context_acquire(iscsilun->aio_context);
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
|
||||
error_report("iSCSI: NOP timeout. Reconnecting...");
|
||||
iscsilun->request_timed_out = true;
|
||||
|
@ -1344,7 +1387,7 @@ static void iscsi_nop_timed_event(void *opaque)
|
|||
iscsi_set_events(iscsilun);
|
||||
|
||||
out:
|
||||
aio_context_release(iscsilun->aio_context);
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
|
||||
|
@ -1890,6 +1933,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
scsi_free_scsi_task(task);
|
||||
task = NULL;
|
||||
|
||||
qemu_mutex_init(&iscsilun->mutex);
|
||||
iscsi_attach_aio_context(bs, iscsilun->aio_context);
|
||||
|
||||
/* Guess the internal cluster (page) size of the iscsi target by the means
|
||||
|
@ -1935,6 +1979,7 @@ static void iscsi_close(BlockDriverState *bs)
|
|||
iscsi_destroy_context(iscsi);
|
||||
g_free(iscsilun->zeroblock);
|
||||
iscsi_allocmap_free(iscsilun);
|
||||
qemu_mutex_destroy(&iscsilun->mutex);
|
||||
memset(iscsilun, 0, sizeof(IscsiLun));
|
||||
}
|
||||
|
||||
|
|
23
block/nfs.c
23
block/nfs.c
|
@ -54,6 +54,7 @@ typedef struct NFSClient {
|
|||
int events;
|
||||
bool has_zero_init;
|
||||
AioContext *aio_context;
|
||||
QemuMutex mutex;
|
||||
blkcnt_t st_blocks;
|
||||
bool cache_used;
|
||||
NFSServer *server;
|
||||
|
@ -191,6 +192,7 @@ static void nfs_parse_filename(const char *filename, QDict *options,
|
|||
static void nfs_process_read(void *arg);
|
||||
static void nfs_process_write(void *arg);
|
||||
|
||||
/* Called with QemuMutex held. */
|
||||
static void nfs_set_events(NFSClient *client)
|
||||
{
|
||||
int ev = nfs_which_events(client->context);
|
||||
|
@ -209,20 +211,20 @@ static void nfs_process_read(void *arg)
|
|||
{
|
||||
NFSClient *client = arg;
|
||||
|
||||
aio_context_acquire(client->aio_context);
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
nfs_service(client->context, POLLIN);
|
||||
nfs_set_events(client);
|
||||
aio_context_release(client->aio_context);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
}
|
||||
|
||||
static void nfs_process_write(void *arg)
|
||||
{
|
||||
NFSClient *client = arg;
|
||||
|
||||
aio_context_acquire(client->aio_context);
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
nfs_service(client->context, POLLOUT);
|
||||
nfs_set_events(client);
|
||||
aio_context_release(client->aio_context);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
}
|
||||
|
||||
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
|
||||
|
@ -242,6 +244,7 @@ static void nfs_co_generic_bh_cb(void *opaque)
|
|||
aio_co_wake(task->co);
|
||||
}
|
||||
|
||||
/* Called (via nfs_service) with QemuMutex held. */
|
||||
static void
|
||||
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
|
||||
void *private_data)
|
||||
|
@ -273,12 +276,15 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|||
nfs_co_init_task(bs, &task);
|
||||
task.iov = iov;
|
||||
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
if (nfs_pread_async(client->context, client->fh,
|
||||
offset, bytes, nfs_co_generic_cb, &task) != 0) {
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nfs_set_events(client);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
@ -317,9 +323,11 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
|||
buf = iov->iov[0].iov_base;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
if (nfs_pwrite_async(client->context, client->fh,
|
||||
offset, bytes, buf,
|
||||
nfs_co_generic_cb, &task) != 0) {
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
if (my_buffer) {
|
||||
g_free(buf);
|
||||
}
|
||||
|
@ -327,6 +335,7 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
|||
}
|
||||
|
||||
nfs_set_events(client);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
@ -349,12 +358,15 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
|
|||
|
||||
nfs_co_init_task(bs, &task);
|
||||
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
|
||||
&task) != 0) {
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nfs_set_events(client);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
@ -440,6 +452,7 @@ static void nfs_file_close(BlockDriverState *bs)
|
|||
{
|
||||
NFSClient *client = bs->opaque;
|
||||
nfs_client_close(client);
|
||||
qemu_mutex_destroy(&client->mutex);
|
||||
}
|
||||
|
||||
static NFSServer *nfs_config(QDict *options, Error **errp)
|
||||
|
@ -647,6 +660,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
qemu_mutex_init(&client->mutex);
|
||||
bs->total_sectors = ret;
|
||||
ret = 0;
|
||||
return ret;
|
||||
|
@ -702,6 +716,7 @@ static int nfs_has_zero_init(BlockDriverState *bs)
|
|||
return client->has_zero_init;
|
||||
}
|
||||
|
||||
/* Called (via nfs_service) with QemuMutex held. */
|
||||
static void
|
||||
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
|
||||
void *private_data)
|
||||
|
|
Loading…
Reference in New Issue