mirror of https://github.com/xqemu/xqemu.git
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1 iQIcBAABAgAGBQJWQ4IFAAoJEL2+eyfA3jBXJI4P/21rybu1IWAVWHBVsrYw25cx eO8FTtiU6/g+OuYgqYM5mRqE0ZvV+mE9nv1t40EvGlP+W8bVbue9vIGoz1523ygh U3GABsicHVOjDHQA1yw7PcPfMPrPxSJE96rpxeYvingkXtfA9aFEtYGTGcNSRcNk w7WtQdsOEPWfMLtvfcz3pkyZWMVTN7Uo2AETmQgkZBkZhI4kPFzkXk+p+WbeTjRp S329GKJkMgcsZ6XNWp27rIeb0T4O9AfZOOQtShNOS8IQSq1lEf6smfF6swQqSplI E6tgmeK+oJi2SV7wSURYb/6ypM3v408mCjxHdCmNdHzbwoPnFd5FF7qmz+00mQ2C dFduVlgNLpq26uzvR/3+nBJIffjTU4mF2aCa4mMCEKfD4qgHuCkMLJme4e3KBdTW RvzefSVWML9zcgOYA/NVCy9WHIZEvyleXAVupbABJ+/w1iJYTqaphMhecfYux0S0 BGyFSPRLYS1/JC4OSzVhW0wHj3QYgs/edMtIh6jRXuFWdhV1kurlcXR7pZvqjUbS xRWKZeyaWpIEbvR9BcYiHiidQPNXwT1VikIJ6CEtSe5bmyTyClQ3j6laCVhJGxRW ymWzQd7RUDvy4w7aIZJSyUubTtj7NohPn86BHGIWRXa6+kSy5C+Gz98ijR1Jb2gl xB2CkNsDHi4f8nWO/syI =tlqg -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging # gpg: Signature made Wed 11 Nov 2015 17:59:33 GMT using RSA key ID C0DE3057 # gpg: Good signature from "Jeffrey Cody <jcody@redhat.com>" # gpg: aka "Jeffrey Cody <jeff@codyprime.org>" # gpg: aka "Jeffrey Cody <codyprime@gmail.com>" * remotes/cody/tags/block-pull-request: gluster: allocate GlusterAIOCBs on the stack Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
7497b8dddc
|
@ -429,28 +429,23 @@ static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
|
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
|
GlusterAIOCB acb;
|
||||||
BDRVGlusterState *s = bs->opaque;
|
BDRVGlusterState *s = bs->opaque;
|
||||||
off_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
off_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
||||||
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
||||||
|
|
||||||
acb->size = size;
|
acb.size = size;
|
||||||
acb->ret = 0;
|
acb.ret = 0;
|
||||||
acb->coroutine = qemu_coroutine_self();
|
acb.coroutine = qemu_coroutine_self();
|
||||||
acb->aio_context = bdrv_get_aio_context(bs);
|
acb.aio_context = bdrv_get_aio_context(bs);
|
||||||
|
|
||||||
ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
|
ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -errno;
|
return -errno;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
ret = acb->ret;
|
return acb.ret;
|
||||||
|
|
||||||
out:
|
|
||||||
g_slice_free(GlusterAIOCB, acb);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool gluster_supports_zerofill(void)
|
static inline bool gluster_supports_zerofill(void)
|
||||||
|
@ -541,35 +536,30 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
|
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
|
GlusterAIOCB acb;
|
||||||
BDRVGlusterState *s = bs->opaque;
|
BDRVGlusterState *s = bs->opaque;
|
||||||
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
||||||
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
||||||
|
|
||||||
acb->size = size;
|
acb.size = size;
|
||||||
acb->ret = 0;
|
acb.ret = 0;
|
||||||
acb->coroutine = qemu_coroutine_self();
|
acb.coroutine = qemu_coroutine_self();
|
||||||
acb->aio_context = bdrv_get_aio_context(bs);
|
acb.aio_context = bdrv_get_aio_context(bs);
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
||||||
&gluster_finish_aiocb, acb);
|
gluster_finish_aiocb, &acb);
|
||||||
} else {
|
} else {
|
||||||
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
||||||
&gluster_finish_aiocb, acb);
|
gluster_finish_aiocb, &acb);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -errno;
|
return -errno;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
ret = acb->ret;
|
return acb.ret;
|
||||||
|
|
||||||
out:
|
|
||||||
g_slice_free(GlusterAIOCB, acb);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
|
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
|
||||||
|
@ -600,26 +590,21 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
|
||||||
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
|
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
|
GlusterAIOCB acb;
|
||||||
BDRVGlusterState *s = bs->opaque;
|
BDRVGlusterState *s = bs->opaque;
|
||||||
|
|
||||||
acb->size = 0;
|
acb.size = 0;
|
||||||
acb->ret = 0;
|
acb.ret = 0;
|
||||||
acb->coroutine = qemu_coroutine_self();
|
acb.coroutine = qemu_coroutine_self();
|
||||||
acb->aio_context = bdrv_get_aio_context(bs);
|
acb.aio_context = bdrv_get_aio_context(bs);
|
||||||
|
|
||||||
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
|
ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -errno;
|
return -errno;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
ret = acb->ret;
|
return acb.ret;
|
||||||
|
|
||||||
out:
|
|
||||||
g_slice_free(GlusterAIOCB, acb);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||||
|
@ -627,28 +612,23 @@ static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors)
|
int64_t sector_num, int nb_sectors)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
|
GlusterAIOCB acb;
|
||||||
BDRVGlusterState *s = bs->opaque;
|
BDRVGlusterState *s = bs->opaque;
|
||||||
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
||||||
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
||||||
|
|
||||||
acb->size = 0;
|
acb.size = 0;
|
||||||
acb->ret = 0;
|
acb.ret = 0;
|
||||||
acb->coroutine = qemu_coroutine_self();
|
acb.coroutine = qemu_coroutine_self();
|
||||||
acb->aio_context = bdrv_get_aio_context(bs);
|
acb.aio_context = bdrv_get_aio_context(bs);
|
||||||
|
|
||||||
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
|
ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -errno;
|
return -errno;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
ret = acb->ret;
|
return acb.ret;
|
||||||
|
|
||||||
out:
|
|
||||||
g_slice_free(GlusterAIOCB, acb);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue