Block layer patches:

- Generic background jobs
 - qemu-iotests fixes for NFS and the 'migration' group
 - sheepdog: Minor code simplification
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbBV+tAAoJEH8JsnLIjy/WiBoP/0C7IObZ6/yOedSFsCADdt1U
 Kv2So7V7OBC8QeDhJqB9x5GZ4XieBWHqCR7yFKG3/piMuv1+HRiuA4aNiW1mwreU
 VqI6Ls4pZ8jEWYl6p0+enhA2cQVEDJjICC7n6xw8rhu1OaMdXwzNXBjXVMJHmMCg
 m6KixQXB1JCPuGZmoNE6wE34gsCiWTKrbt6T83RMcacJaVBcumN6RyzkFeLZ2jef
 XXNXYGrZ2/UBGJOBxpLjbZvgYehMcUd20HZ0jNFtiA9+APBVoc6iFAXseT9tbedm
 QBdOw8ESshTKOfpNxLEDrQomCpCAmGf5q3z7omikFk3VBaap1cE5ZCA/ylvb/sxp
 ZoigX/VfZ8auol+y/kLS4SAGKbnyg6qT7LX1G1op+Fwewo+mMTpBeNvDGQIIGR33
 cCRl7YBpHvtN2T4GzmGhPKcWqNpykqmNgW/ZMW1BG1NeR8zkdEtvH4xESxAYkaXm
 a7nFCaxAQlU1l0EV9jVKSnS07/8eKCXStGatBBMYn4VjRYZSVaWW1J5WW16nJpip
 eDfv3QQPujAsSAwC1V+4WyrcqBx1Ya2Accic2rbpiLFHAsEFl+UMhiw2ken1x85W
 +ZOyAMXXRPVsYMPAOCZIemYONAons2GSJsJvIZC9ln4yjAFNuGNTvTDgoiZfYxBC
 P9eAY6T905Y2f7bMYV9y
 =cNlf
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging

Block layer patches:

- Generic background jobs
- qemu-iotests fixes for NFS and the 'migration' group
- sheepdog: Minor code simplification

# gpg: Signature made Wed 23 May 2018 13:33:49 BST
# gpg:                using RSA key 7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* remotes/kevin/tags/for-upstream: (46 commits)
  qemu-iotests: Test job-* with block jobs
  iotests: Move qmp_to_opts() to VM
  blockjob: Remove BlockJob.driver
  job: Add query-jobs QMP command
  job: Add lifecycle QMP commands
  job: Add JOB_STATUS_CHANGE QMP event
  job: Introduce qapi/job.json
  job: Move progress fields to Job
  job: Add job_transition_to_ready()
  job: Add job_is_ready()
  job: Add job_dismiss()
  job: Add job_yield()
  block: Cancel job in bdrv_close_all() callers
  job: Move completion and cancellation to Job
  job: Move transactions to Job
  job: Switch transactions to JobTxn
  job: Move job_finish_sync() to Job
  job: Move .complete callback to Job
  job: Add job_drain()
  job: Convert block_job_cancel_async() to Job
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-05-24 13:24:22 +01:00
commit 37cbe4da61
58 changed files with 3607 additions and 1903 deletions

View File

@ -1369,10 +1369,14 @@ L: qemu-block@nongnu.org
S: Supported S: Supported
F: blockjob.c F: blockjob.c
F: include/block/blockjob.h F: include/block/blockjob.h
F: job.c
F: job-qmp.c
F: include/block/job.h
F: block/backup.c F: block/backup.c
F: block/commit.c F: block/commit.c
F: block/stream.c F: block/stream.c
F: block/mirror.c F: block/mirror.c
F: qapi/job.json
T: git git://github.com/codyprime/qemu-kvm-jtc.git block T: git git://github.com/codyprime/qemu-kvm-jtc.git block
Block QAPI, monitor, command line Block QAPI, monitor, command line

View File

@ -98,6 +98,7 @@ GENERATED_FILES += qapi/qapi-types-char.h qapi/qapi-types-char.c
GENERATED_FILES += qapi/qapi-types-common.h qapi/qapi-types-common.c GENERATED_FILES += qapi/qapi-types-common.h qapi/qapi-types-common.c
GENERATED_FILES += qapi/qapi-types-crypto.h qapi/qapi-types-crypto.c GENERATED_FILES += qapi/qapi-types-crypto.h qapi/qapi-types-crypto.c
GENERATED_FILES += qapi/qapi-types-introspect.h qapi/qapi-types-introspect.c GENERATED_FILES += qapi/qapi-types-introspect.h qapi/qapi-types-introspect.c
GENERATED_FILES += qapi/qapi-types-job.h qapi/qapi-types-job.c
GENERATED_FILES += qapi/qapi-types-migration.h qapi/qapi-types-migration.c GENERATED_FILES += qapi/qapi-types-migration.h qapi/qapi-types-migration.c
GENERATED_FILES += qapi/qapi-types-misc.h qapi/qapi-types-misc.c GENERATED_FILES += qapi/qapi-types-misc.h qapi/qapi-types-misc.c
GENERATED_FILES += qapi/qapi-types-net.h qapi/qapi-types-net.c GENERATED_FILES += qapi/qapi-types-net.h qapi/qapi-types-net.c
@ -116,6 +117,7 @@ GENERATED_FILES += qapi/qapi-visit-char.h qapi/qapi-visit-char.c
GENERATED_FILES += qapi/qapi-visit-common.h qapi/qapi-visit-common.c GENERATED_FILES += qapi/qapi-visit-common.h qapi/qapi-visit-common.c
GENERATED_FILES += qapi/qapi-visit-crypto.h qapi/qapi-visit-crypto.c GENERATED_FILES += qapi/qapi-visit-crypto.h qapi/qapi-visit-crypto.c
GENERATED_FILES += qapi/qapi-visit-introspect.h qapi/qapi-visit-introspect.c GENERATED_FILES += qapi/qapi-visit-introspect.h qapi/qapi-visit-introspect.c
GENERATED_FILES += qapi/qapi-visit-job.h qapi/qapi-visit-job.c
GENERATED_FILES += qapi/qapi-visit-migration.h qapi/qapi-visit-migration.c GENERATED_FILES += qapi/qapi-visit-migration.h qapi/qapi-visit-migration.c
GENERATED_FILES += qapi/qapi-visit-misc.h qapi/qapi-visit-misc.c GENERATED_FILES += qapi/qapi-visit-misc.h qapi/qapi-visit-misc.c
GENERATED_FILES += qapi/qapi-visit-net.h qapi/qapi-visit-net.c GENERATED_FILES += qapi/qapi-visit-net.h qapi/qapi-visit-net.c
@ -133,6 +135,7 @@ GENERATED_FILES += qapi/qapi-commands-char.h qapi/qapi-commands-char.c
GENERATED_FILES += qapi/qapi-commands-common.h qapi/qapi-commands-common.c GENERATED_FILES += qapi/qapi-commands-common.h qapi/qapi-commands-common.c
GENERATED_FILES += qapi/qapi-commands-crypto.h qapi/qapi-commands-crypto.c GENERATED_FILES += qapi/qapi-commands-crypto.h qapi/qapi-commands-crypto.c
GENERATED_FILES += qapi/qapi-commands-introspect.h qapi/qapi-commands-introspect.c GENERATED_FILES += qapi/qapi-commands-introspect.h qapi/qapi-commands-introspect.c
GENERATED_FILES += qapi/qapi-commands-job.h qapi/qapi-commands-job.c
GENERATED_FILES += qapi/qapi-commands-migration.h qapi/qapi-commands-migration.c GENERATED_FILES += qapi/qapi-commands-migration.h qapi/qapi-commands-migration.c
GENERATED_FILES += qapi/qapi-commands-misc.h qapi/qapi-commands-misc.c GENERATED_FILES += qapi/qapi-commands-misc.h qapi/qapi-commands-misc.c
GENERATED_FILES += qapi/qapi-commands-net.h qapi/qapi-commands-net.c GENERATED_FILES += qapi/qapi-commands-net.h qapi/qapi-commands-net.c
@ -150,6 +153,7 @@ GENERATED_FILES += qapi/qapi-events-char.h qapi/qapi-events-char.c
GENERATED_FILES += qapi/qapi-events-common.h qapi/qapi-events-common.c GENERATED_FILES += qapi/qapi-events-common.h qapi/qapi-events-common.c
GENERATED_FILES += qapi/qapi-events-crypto.h qapi/qapi-events-crypto.c GENERATED_FILES += qapi/qapi-events-crypto.h qapi/qapi-events-crypto.c
GENERATED_FILES += qapi/qapi-events-introspect.h qapi/qapi-events-introspect.c GENERATED_FILES += qapi/qapi-events-introspect.h qapi/qapi-events-introspect.c
GENERATED_FILES += qapi/qapi-events-job.h qapi/qapi-events-job.c
GENERATED_FILES += qapi/qapi-events-migration.h qapi/qapi-events-migration.c GENERATED_FILES += qapi/qapi-events-migration.h qapi/qapi-events-migration.c
GENERATED_FILES += qapi/qapi-events-misc.h qapi/qapi-events-misc.c GENERATED_FILES += qapi/qapi-events-misc.h qapi/qapi-events-misc.c
GENERATED_FILES += qapi/qapi-events-net.h qapi/qapi-events-net.c GENERATED_FILES += qapi/qapi-events-net.h qapi/qapi-events-net.c
@ -582,6 +586,7 @@ qapi-modules = $(SRC_PATH)/qapi/qapi-schema.json $(SRC_PATH)/qapi/common.json \
$(SRC_PATH)/qapi/char.json \ $(SRC_PATH)/qapi/char.json \
$(SRC_PATH)/qapi/crypto.json \ $(SRC_PATH)/qapi/crypto.json \
$(SRC_PATH)/qapi/introspect.json \ $(SRC_PATH)/qapi/introspect.json \
$(SRC_PATH)/qapi/job.json \
$(SRC_PATH)/qapi/migration.json \ $(SRC_PATH)/qapi/migration.json \
$(SRC_PATH)/qapi/misc.json \ $(SRC_PATH)/qapi/misc.json \
$(SRC_PATH)/qapi/net.json \ $(SRC_PATH)/qapi/net.json \
@ -601,6 +606,7 @@ qapi/qapi-types-char.c qapi/qapi-types-char.h \
qapi/qapi-types-common.c qapi/qapi-types-common.h \ qapi/qapi-types-common.c qapi/qapi-types-common.h \
qapi/qapi-types-crypto.c qapi/qapi-types-crypto.h \ qapi/qapi-types-crypto.c qapi/qapi-types-crypto.h \
qapi/qapi-types-introspect.c qapi/qapi-types-introspect.h \ qapi/qapi-types-introspect.c qapi/qapi-types-introspect.h \
qapi/qapi-types-job.c qapi/qapi-types-job.h \
qapi/qapi-types-migration.c qapi/qapi-types-migration.h \ qapi/qapi-types-migration.c qapi/qapi-types-migration.h \
qapi/qapi-types-misc.c qapi/qapi-types-misc.h \ qapi/qapi-types-misc.c qapi/qapi-types-misc.h \
qapi/qapi-types-net.c qapi/qapi-types-net.h \ qapi/qapi-types-net.c qapi/qapi-types-net.h \
@ -619,6 +625,7 @@ qapi/qapi-visit-char.c qapi/qapi-visit-char.h \
qapi/qapi-visit-common.c qapi/qapi-visit-common.h \ qapi/qapi-visit-common.c qapi/qapi-visit-common.h \
qapi/qapi-visit-crypto.c qapi/qapi-visit-crypto.h \ qapi/qapi-visit-crypto.c qapi/qapi-visit-crypto.h \
qapi/qapi-visit-introspect.c qapi/qapi-visit-introspect.h \ qapi/qapi-visit-introspect.c qapi/qapi-visit-introspect.h \
qapi/qapi-visit-job.c qapi/qapi-visit-job.h \
qapi/qapi-visit-migration.c qapi/qapi-visit-migration.h \ qapi/qapi-visit-migration.c qapi/qapi-visit-migration.h \
qapi/qapi-visit-misc.c qapi/qapi-visit-misc.h \ qapi/qapi-visit-misc.c qapi/qapi-visit-misc.h \
qapi/qapi-visit-net.c qapi/qapi-visit-net.h \ qapi/qapi-visit-net.c qapi/qapi-visit-net.h \
@ -636,6 +643,7 @@ qapi/qapi-commands-char.c qapi/qapi-commands-char.h \
qapi/qapi-commands-common.c qapi/qapi-commands-common.h \ qapi/qapi-commands-common.c qapi/qapi-commands-common.h \
qapi/qapi-commands-crypto.c qapi/qapi-commands-crypto.h \ qapi/qapi-commands-crypto.c qapi/qapi-commands-crypto.h \
qapi/qapi-commands-introspect.c qapi/qapi-commands-introspect.h \ qapi/qapi-commands-introspect.c qapi/qapi-commands-introspect.h \
qapi/qapi-commands-job.c qapi/qapi-commands-job.h \
qapi/qapi-commands-migration.c qapi/qapi-commands-migration.h \ qapi/qapi-commands-migration.c qapi/qapi-commands-migration.h \
qapi/qapi-commands-misc.c qapi/qapi-commands-misc.h \ qapi/qapi-commands-misc.c qapi/qapi-commands-misc.h \
qapi/qapi-commands-net.c qapi/qapi-commands-net.h \ qapi/qapi-commands-net.c qapi/qapi-commands-net.h \
@ -653,6 +661,7 @@ qapi/qapi-events-char.c qapi/qapi-events-char.h \
qapi/qapi-events-common.c qapi/qapi-events-common.h \ qapi/qapi-events-common.c qapi/qapi-events-common.h \
qapi/qapi-events-crypto.c qapi/qapi-events-crypto.h \ qapi/qapi-events-crypto.c qapi/qapi-events-crypto.h \
qapi/qapi-events-introspect.c qapi/qapi-events-introspect.h \ qapi/qapi-events-introspect.c qapi/qapi-events-introspect.h \
qapi/qapi-events-job.c qapi/qapi-events-job.h \
qapi/qapi-events-migration.c qapi/qapi-events-migration.h \ qapi/qapi-events-migration.c qapi/qapi-events-migration.h \
qapi/qapi-events-misc.c qapi/qapi-events-misc.h \ qapi/qapi-events-misc.c qapi/qapi-events-misc.h \
qapi/qapi-events-net.c qapi/qapi-events-net.h \ qapi/qapi-events-net.c qapi/qapi-events-net.h \

View File

@ -10,6 +10,7 @@ util-obj-y += qapi/qapi-types-char.o
util-obj-y += qapi/qapi-types-common.o util-obj-y += qapi/qapi-types-common.o
util-obj-y += qapi/qapi-types-crypto.o util-obj-y += qapi/qapi-types-crypto.o
util-obj-y += qapi/qapi-types-introspect.o util-obj-y += qapi/qapi-types-introspect.o
util-obj-y += qapi/qapi-types-job.o
util-obj-y += qapi/qapi-types-migration.o util-obj-y += qapi/qapi-types-migration.o
util-obj-y += qapi/qapi-types-misc.o util-obj-y += qapi/qapi-types-misc.o
util-obj-y += qapi/qapi-types-net.o util-obj-y += qapi/qapi-types-net.o
@ -28,6 +29,7 @@ util-obj-y += qapi/qapi-visit-char.o
util-obj-y += qapi/qapi-visit-common.o util-obj-y += qapi/qapi-visit-common.o
util-obj-y += qapi/qapi-visit-crypto.o util-obj-y += qapi/qapi-visit-crypto.o
util-obj-y += qapi/qapi-visit-introspect.o util-obj-y += qapi/qapi-visit-introspect.o
util-obj-y += qapi/qapi-visit-job.o
util-obj-y += qapi/qapi-visit-migration.o util-obj-y += qapi/qapi-visit-migration.o
util-obj-y += qapi/qapi-visit-misc.o util-obj-y += qapi/qapi-visit-misc.o
util-obj-y += qapi/qapi-visit-net.o util-obj-y += qapi/qapi-visit-net.o
@ -45,6 +47,7 @@ util-obj-y += qapi/qapi-events-char.o
util-obj-y += qapi/qapi-events-common.o util-obj-y += qapi/qapi-events-common.o
util-obj-y += qapi/qapi-events-crypto.o util-obj-y += qapi/qapi-events-crypto.o
util-obj-y += qapi/qapi-events-introspect.o util-obj-y += qapi/qapi-events-introspect.o
util-obj-y += qapi/qapi-events-job.o
util-obj-y += qapi/qapi-events-migration.o util-obj-y += qapi/qapi-events-migration.o
util-obj-y += qapi/qapi-events-misc.o util-obj-y += qapi/qapi-events-misc.o
util-obj-y += qapi/qapi-events-net.o util-obj-y += qapi/qapi-events-net.o
@ -63,7 +66,7 @@ chardev-obj-y = chardev/
# block-obj-y is code used by both qemu system emulation and qemu-img # block-obj-y is code used by both qemu system emulation and qemu-img
block-obj-y += nbd/ block-obj-y += nbd/
block-obj-y += block.o blockjob.o block-obj-y += block.o blockjob.o job.o
block-obj-y += block/ scsi/ block-obj-y += block/ scsi/
block-obj-y += qemu-io-cmds.o block-obj-y += qemu-io-cmds.o
block-obj-$(CONFIG_REPLICATION) += replication.o block-obj-$(CONFIG_REPLICATION) += replication.o
@ -94,6 +97,7 @@ io-obj-y = io/
ifeq ($(CONFIG_SOFTMMU),y) ifeq ($(CONFIG_SOFTMMU),y)
common-obj-y = blockdev.o blockdev-nbd.o block/ common-obj-y = blockdev.o blockdev-nbd.o block/
common-obj-y += bootdevice.o iothread.o common-obj-y += bootdevice.o iothread.o
common-obj-y += job-qmp.o
common-obj-y += net/ common-obj-y += net/
common-obj-y += qdev-monitor.o device-hotplug.o common-obj-y += qdev-monitor.o device-hotplug.o
common-obj-$(CONFIG_WIN32) += os-win32.o common-obj-$(CONFIG_WIN32) += os-win32.o
@ -140,6 +144,7 @@ common-obj-y += qapi/qapi-commands-char.o
common-obj-y += qapi/qapi-commands-common.o common-obj-y += qapi/qapi-commands-common.o
common-obj-y += qapi/qapi-commands-crypto.o common-obj-y += qapi/qapi-commands-crypto.o
common-obj-y += qapi/qapi-commands-introspect.o common-obj-y += qapi/qapi-commands-introspect.o
common-obj-y += qapi/qapi-commands-job.o
common-obj-y += qapi/qapi-commands-migration.o common-obj-y += qapi/qapi-commands-migration.o
common-obj-y += qapi/qapi-commands-misc.o common-obj-y += qapi/qapi-commands-misc.o
common-obj-y += qapi/qapi-commands-net.o common-obj-y += qapi/qapi-commands-net.o

View File

@ -3362,7 +3362,7 @@ static void bdrv_close(BlockDriverState *bs)
void bdrv_close_all(void) void bdrv_close_all(void)
{ {
block_job_cancel_sync_all(); assert(job_next(NULL) == NULL);
nbd_export_close_all(); nbd_export_close_all();
/* Drop references from requests still in flight, such as canceled block /* Drop references from requests still in flight, such as canceled block

View File

@ -160,7 +160,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
* offset field is an opaque progress value, it is not a disk offset. * offset field is an opaque progress value, it is not a disk offset.
*/ */
job->bytes_read += n; job->bytes_read += n;
block_job_progress_update(&job->common, n); job_progress_update(&job->common.job, n);
} }
out: out:
@ -207,25 +207,25 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
} }
} }
static void backup_commit(BlockJob *job) static void backup_commit(Job *job)
{ {
BackupBlockJob *s = container_of(job, BackupBlockJob, common); BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
if (s->sync_bitmap) { if (s->sync_bitmap) {
backup_cleanup_sync_bitmap(s, 0); backup_cleanup_sync_bitmap(s, 0);
} }
} }
static void backup_abort(BlockJob *job) static void backup_abort(Job *job)
{ {
BackupBlockJob *s = container_of(job, BackupBlockJob, common); BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
if (s->sync_bitmap) { if (s->sync_bitmap) {
backup_cleanup_sync_bitmap(s, -1); backup_cleanup_sync_bitmap(s, -1);
} }
} }
static void backup_clean(BlockJob *job) static void backup_clean(Job *job)
{ {
BackupBlockJob *s = container_of(job, BackupBlockJob, common); BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
assert(s->target); assert(s->target);
blk_unref(s->target); blk_unref(s->target);
s->target = NULL; s->target = NULL;
@ -317,11 +317,11 @@ typedef struct {
int ret; int ret;
} BackupCompleteData; } BackupCompleteData;
static void backup_complete(BlockJob *job, void *opaque) static void backup_complete(Job *job, void *opaque)
{ {
BackupCompleteData *data = opaque; BackupCompleteData *data = opaque;
block_job_completed(job, data->ret); job_completed(job, data->ret);
g_free(data); g_free(data);
} }
@ -329,7 +329,7 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
{ {
uint64_t delay_ns; uint64_t delay_ns;
if (block_job_is_cancelled(&job->common)) { if (job_is_cancelled(&job->common.job)) {
return true; return true;
} }
@ -337,9 +337,9 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
* return. Without a yield, the VM would not reboot. */ * return. Without a yield, the VM would not reboot. */
delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read); delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
job->bytes_read = 0; job->bytes_read = 0;
block_job_sleep_ns(&job->common, delay_ns); job_sleep_ns(&job->common.job, delay_ns);
if (block_job_is_cancelled(&job->common)) { if (job_is_cancelled(&job->common.job)) {
return true; return true;
} }
@ -406,8 +406,8 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
} }
/* TODO block_job_progress_set_remaining() would make more sense */ /* TODO job_progress_set_remaining() would make more sense */
block_job_progress_update(&job->common, job_progress_update(&job->common.job,
job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
bdrv_dirty_iter_free(dbi); bdrv_dirty_iter_free(dbi);
@ -425,7 +425,7 @@ static void coroutine_fn backup_run(void *opaque)
qemu_co_rwlock_init(&job->flush_rwlock); qemu_co_rwlock_init(&job->flush_rwlock);
nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size); nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
block_job_progress_set_remaining(&job->common, job->len); job_progress_set_remaining(&job->common.job, job->len);
job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
@ -441,10 +441,10 @@ static void coroutine_fn backup_run(void *opaque)
if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied. /* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */ * This does not actually require them to be copied. */
while (!block_job_is_cancelled(&job->common)) { while (!job_is_cancelled(&job->common.job)) {
/* Yield until the job is cancelled. We just let our before_write /* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */ * notify callback service CoW requests. */
block_job_yield(&job->common); job_yield(&job->common.job);
} }
} else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
ret = backup_run_incremental(job); ret = backup_run_incremental(job);
@ -519,16 +519,21 @@ static void coroutine_fn backup_run(void *opaque)
data = g_malloc(sizeof(*data)); data = g_malloc(sizeof(*data));
data->ret = ret; data->ret = ret;
block_job_defer_to_main_loop(&job->common, backup_complete, data); job_defer_to_main_loop(&job->common.job, backup_complete, data);
} }
static const BlockJobDriver backup_job_driver = { static const BlockJobDriver backup_job_driver = {
.instance_size = sizeof(BackupBlockJob), .job_driver = {
.job_type = BLOCK_JOB_TYPE_BACKUP, .instance_size = sizeof(BackupBlockJob),
.start = backup_run, .job_type = JOB_TYPE_BACKUP,
.commit = backup_commit, .free = block_job_free,
.abort = backup_abort, .user_resume = block_job_user_resume,
.clean = backup_clean, .drain = block_job_drain,
.start = backup_run,
.commit = backup_commit,
.abort = backup_abort,
.clean = backup_clean,
},
.attached_aio_context = backup_attached_aio_context, .attached_aio_context = backup_attached_aio_context,
.drain = backup_drain, .drain = backup_drain,
}; };
@ -541,7 +546,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockdevOnError on_target_error, BlockdevOnError on_target_error,
int creation_flags, int creation_flags,
BlockCompletionFunc *cb, void *opaque, BlockCompletionFunc *cb, void *opaque,
BlockJobTxn *txn, Error **errp) JobTxn *txn, Error **errp)
{ {
int64_t len; int64_t len;
BlockDriverInfo bdi; BlockDriverInfo bdi;
@ -673,8 +678,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
} }
if (job) { if (job) {
backup_clean(&job->common); backup_clean(&job->common.job);
block_job_early_fail(&job->common); job_early_fail(&job->common.job);
} }
return NULL; return NULL;

View File

@ -72,9 +72,10 @@ typedef struct {
int ret; int ret;
} CommitCompleteData; } CommitCompleteData;
static void commit_complete(BlockJob *job, void *opaque) static void commit_complete(Job *job, void *opaque)
{ {
CommitBlockJob *s = container_of(job, CommitBlockJob, common); CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
BlockJob *bjob = &s->common;
CommitCompleteData *data = opaque; CommitCompleteData *data = opaque;
BlockDriverState *top = blk_bs(s->top); BlockDriverState *top = blk_bs(s->top);
BlockDriverState *base = blk_bs(s->base); BlockDriverState *base = blk_bs(s->base);
@ -90,7 +91,7 @@ static void commit_complete(BlockJob *job, void *opaque)
* the normal backing chain can be restored. */ * the normal backing chain can be restored. */
blk_unref(s->base); blk_unref(s->base);
if (!block_job_is_cancelled(&s->common) && ret == 0) { if (!job_is_cancelled(job) && ret == 0) {
/* success */ /* success */
ret = bdrv_drop_intermediate(s->commit_top_bs, base, ret = bdrv_drop_intermediate(s->commit_top_bs, base,
s->backing_file_str); s->backing_file_str);
@ -111,12 +112,12 @@ static void commit_complete(BlockJob *job, void *opaque)
blk_unref(s->top); blk_unref(s->top);
/* If there is more than one reference to the job (e.g. if called from /* If there is more than one reference to the job (e.g. if called from
* block_job_finish_sync()), block_job_completed() won't free it and * job_finish_sync()), job_completed() won't free it and therefore the
* therefore the blockers on the intermediate nodes remain. This would * blockers on the intermediate nodes remain. This would cause
* cause bdrv_set_backing_hd() to fail. */ * bdrv_set_backing_hd() to fail. */
block_job_remove_all_bdrv(job); block_job_remove_all_bdrv(bjob);
block_job_completed(&s->common, ret); job_completed(job, ret);
g_free(data); g_free(data);
/* If bdrv_drop_intermediate() didn't already do that, remove the commit /* If bdrv_drop_intermediate() didn't already do that, remove the commit
@ -149,7 +150,7 @@ static void coroutine_fn commit_run(void *opaque)
if (len < 0) { if (len < 0) {
goto out; goto out;
} }
block_job_progress_set_remaining(&s->common, len); job_progress_set_remaining(&s->common.job, len);
ret = base_len = blk_getlength(s->base); ret = base_len = blk_getlength(s->base);
if (base_len < 0) { if (base_len < 0) {
@ -171,8 +172,8 @@ static void coroutine_fn commit_run(void *opaque)
/* Note that even when no rate limit is applied we need to yield /* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns. * with no pending I/O here so that bdrv_drain_all() returns.
*/ */
block_job_sleep_ns(&s->common, delay_ns); job_sleep_ns(&s->common.job, delay_ns);
if (block_job_is_cancelled(&s->common)) { if (job_is_cancelled(&s->common.job)) {
break; break;
} }
/* Copy if allocated above the base */ /* Copy if allocated above the base */
@ -195,7 +196,7 @@ static void coroutine_fn commit_run(void *opaque)
} }
} }
/* Publish progress */ /* Publish progress */
block_job_progress_update(&s->common, n); job_progress_update(&s->common.job, n);
if (copy) { if (copy) {
delay_ns = block_job_ratelimit_get_delay(&s->common, n); delay_ns = block_job_ratelimit_get_delay(&s->common, n);
@ -211,13 +212,18 @@ out:
data = g_malloc(sizeof(*data)); data = g_malloc(sizeof(*data));
data->ret = ret; data->ret = ret;
block_job_defer_to_main_loop(&s->common, commit_complete, data); job_defer_to_main_loop(&s->common.job, commit_complete, data);
} }
static const BlockJobDriver commit_job_driver = { static const BlockJobDriver commit_job_driver = {
.instance_size = sizeof(CommitBlockJob), .job_driver = {
.job_type = BLOCK_JOB_TYPE_COMMIT, .instance_size = sizeof(CommitBlockJob),
.start = commit_run, .job_type = JOB_TYPE_COMMIT,
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.start = commit_run,
},
}; };
static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs, static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
@ -277,7 +283,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
} }
s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL, s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); speed, JOB_DEFAULT, NULL, NULL, errp);
if (!s) { if (!s) {
return; return;
} }
@ -367,7 +373,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
s->on_error = on_error; s->on_error = on_error;
trace_commit_start(bs, base, top, s); trace_commit_start(bs, base, top, s);
block_job_start(&s->common); job_start(&s->common.job);
return; return;
fail: fail:
@ -380,7 +386,7 @@ fail:
if (commit_top_bs) { if (commit_top_bs) {
bdrv_replace_node(commit_top_bs, top, &error_abort); bdrv_replace_node(commit_top_bs, top, &error_abort);
} }
block_job_early_fail(&s->common); job_early_fail(&s->common.job);
} }

View File

@ -119,14 +119,14 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
} }
if (!s->initial_zeroing_ongoing) { if (!s->initial_zeroing_ongoing) {
block_job_progress_update(&s->common, op->bytes); job_progress_update(&s->common.job, op->bytes);
} }
} }
qemu_iovec_destroy(&op->qiov); qemu_iovec_destroy(&op->qiov);
g_free(op); g_free(op);
if (s->waiting_for_io) { if (s->waiting_for_io) {
qemu_coroutine_enter(s->common.co); qemu_coroutine_enter(s->common.job.co);
} }
} }
@ -345,7 +345,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
mirror_wait_for_io(s); mirror_wait_for_io(s);
} }
block_job_pause_point(&s->common); job_pause_point(&s->common.job);
/* Find the number of consective dirty chunks following the first dirty /* Find the number of consective dirty chunks following the first dirty
* one, and wait for in flight requests in them. */ * one, and wait for in flight requests in them. */
@ -484,9 +484,10 @@ typedef struct {
int ret; int ret;
} MirrorExitData; } MirrorExitData;
static void mirror_exit(BlockJob *job, void *opaque) static void mirror_exit(Job *job, void *opaque)
{ {
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockJob *bjob = &s->common;
MirrorExitData *data = opaque; MirrorExitData *data = opaque;
AioContext *replace_aio_context = NULL; AioContext *replace_aio_context = NULL;
BlockDriverState *src = s->source; BlockDriverState *src = s->source;
@ -497,7 +498,7 @@ static void mirror_exit(BlockJob *job, void *opaque)
bdrv_release_dirty_bitmap(src, s->dirty_bitmap); bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
/* Make sure that the source BDS doesn't go away before we called /* Make sure that the source BDS doesn't go away before we called
* block_job_completed(). */ * job_completed(). */
bdrv_ref(src); bdrv_ref(src);
bdrv_ref(mirror_top_bs); bdrv_ref(mirror_top_bs);
bdrv_ref(target_bs); bdrv_ref(target_bs);
@ -568,7 +569,7 @@ static void mirror_exit(BlockJob *job, void *opaque)
* the blockers on the intermediate nodes so that the resulting state is * the blockers on the intermediate nodes so that the resulting state is
* valid. Also give up permissions on mirror_top_bs->backing, which might * valid. Also give up permissions on mirror_top_bs->backing, which might
* block the removal. */ * block the removal. */
block_job_remove_all_bdrv(job); block_job_remove_all_bdrv(bjob);
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
&error_abort); &error_abort);
bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
@ -576,11 +577,11 @@ static void mirror_exit(BlockJob *job, void *opaque)
/* We just changed the BDS the job BB refers to (with either or both of the /* We just changed the BDS the job BB refers to (with either or both of the
* bdrv_replace_node() calls), so switch the BB back so the cleanup does * bdrv_replace_node() calls), so switch the BB back so the cleanup does
* the right thing. We don't need any permissions any more now. */ * the right thing. We don't need any permissions any more now. */
blk_remove_bs(job->blk); blk_remove_bs(bjob->blk);
blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
blk_insert_bs(job->blk, mirror_top_bs, &error_abort); blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
block_job_completed(&s->common, data->ret); job_completed(job, data->ret);
g_free(data); g_free(data);
bdrv_drained_end(src); bdrv_drained_end(src);
@ -594,9 +595,9 @@ static void mirror_throttle(MirrorBlockJob *s)
if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
s->last_pause_ns = now; s->last_pause_ns = now;
block_job_sleep_ns(&s->common, 0); job_sleep_ns(&s->common.job, 0);
} else { } else {
block_job_pause_point(&s->common); job_pause_point(&s->common.job);
} }
} }
@ -622,7 +623,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
mirror_throttle(s); mirror_throttle(s);
if (block_job_is_cancelled(&s->common)) { if (job_is_cancelled(&s->common.job)) {
s->initial_zeroing_ongoing = false; s->initial_zeroing_ongoing = false;
return 0; return 0;
} }
@ -650,7 +651,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
mirror_throttle(s); mirror_throttle(s);
if (block_job_is_cancelled(&s->common)) { if (job_is_cancelled(&s->common.job)) {
return 0; return 0;
} }
@ -695,7 +696,7 @@ static void coroutine_fn mirror_run(void *opaque)
checking for a NULL string */ checking for a NULL string */
int ret = 0; int ret = 0;
if (block_job_is_cancelled(&s->common)) { if (job_is_cancelled(&s->common.job)) {
goto immediate_exit; goto immediate_exit;
} }
@ -726,13 +727,13 @@ static void coroutine_fn mirror_run(void *opaque)
} }
if (s->bdev_length == 0) { if (s->bdev_length == 0) {
/* Report BLOCK_JOB_READY and wait for complete. */ /* Transition to the READY state and wait for complete. */
block_job_event_ready(&s->common); job_transition_to_ready(&s->common.job);
s->synced = true; s->synced = true;
while (!block_job_is_cancelled(&s->common) && !s->should_complete) { while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
block_job_yield(&s->common); job_yield(&s->common.job);
} }
s->common.cancelled = false; s->common.job.cancelled = false;
goto immediate_exit; goto immediate_exit;
} }
@ -768,7 +769,7 @@ static void coroutine_fn mirror_run(void *opaque)
s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
if (!s->is_none_mode) { if (!s->is_none_mode) {
ret = mirror_dirty_init(s); ret = mirror_dirty_init(s);
if (ret < 0 || block_job_is_cancelled(&s->common)) { if (ret < 0 || job_is_cancelled(&s->common.job)) {
goto immediate_exit; goto immediate_exit;
} }
} }
@ -785,13 +786,13 @@ static void coroutine_fn mirror_run(void *opaque)
goto immediate_exit; goto immediate_exit;
} }
block_job_pause_point(&s->common); job_pause_point(&s->common.job);
cnt = bdrv_get_dirty_count(s->dirty_bitmap); cnt = bdrv_get_dirty_count(s->dirty_bitmap);
/* cnt is the number of dirty bytes remaining and s->bytes_in_flight is /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
* the number of bytes currently being processed; together those are * the number of bytes currently being processed; together those are
* the current remaining operation length */ * the current remaining operation length */
block_job_progress_set_remaining(&s->common, s->bytes_in_flight + cnt); job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
/* Note that even when no rate limit is applied we need to yield /* Note that even when no rate limit is applied we need to yield
* periodically with no pending I/O so that bdrv_drain_all() returns. * periodically with no pending I/O so that bdrv_drain_all() returns.
@ -823,12 +824,12 @@ static void coroutine_fn mirror_run(void *opaque)
* report completion. This way, block-job-cancel will leave * report completion. This way, block-job-cancel will leave
* the target in a consistent state. * the target in a consistent state.
*/ */
block_job_event_ready(&s->common); job_transition_to_ready(&s->common.job);
s->synced = true; s->synced = true;
} }
should_complete = s->should_complete || should_complete = s->should_complete ||
block_job_is_cancelled(&s->common); job_is_cancelled(&s->common.job);
cnt = bdrv_get_dirty_count(s->dirty_bitmap); cnt = bdrv_get_dirty_count(s->dirty_bitmap);
} }
@ -856,7 +857,7 @@ static void coroutine_fn mirror_run(void *opaque)
* completion. * completion.
*/ */
assert(QLIST_EMPTY(&bs->tracked_requests)); assert(QLIST_EMPTY(&bs->tracked_requests));
s->common.cancelled = false; s->common.job.cancelled = false;
need_drain = false; need_drain = false;
break; break;
} }
@ -868,9 +869,9 @@ static void coroutine_fn mirror_run(void *opaque)
cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
} }
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
block_job_sleep_ns(&s->common, delay_ns); job_sleep_ns(&s->common.job, delay_ns);
if (block_job_is_cancelled(&s->common) && if (job_is_cancelled(&s->common.job) &&
(!s->synced || s->common.force)) (!s->synced || s->common.job.force_cancel))
{ {
break; break;
} }
@ -883,8 +884,8 @@ immediate_exit:
* or it was cancelled prematurely so that we do not guarantee that * or it was cancelled prematurely so that we do not guarantee that
* the target is a copy of the source. * the target is a copy of the source.
*/ */
assert(ret < 0 || ((s->common.force || !s->synced) && assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
block_job_is_cancelled(&s->common))); job_is_cancelled(&s->common.job)));
assert(need_drain); assert(need_drain);
mirror_wait_for_all_io(s); mirror_wait_for_all_io(s);
} }
@ -901,12 +902,12 @@ immediate_exit:
if (need_drain) { if (need_drain) {
bdrv_drained_begin(bs); bdrv_drained_begin(bs);
} }
block_job_defer_to_main_loop(&s->common, mirror_exit, data); job_defer_to_main_loop(&s->common.job, mirror_exit, data);
} }
static void mirror_complete(BlockJob *job, Error **errp) static void mirror_complete(Job *job, Error **errp)
{ {
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockDriverState *target; BlockDriverState *target;
target = blk_bs(s->target); target = blk_bs(s->target);
@ -953,12 +954,12 @@ static void mirror_complete(BlockJob *job, Error **errp)
} }
s->should_complete = true; s->should_complete = true;
block_job_enter(&s->common); job_enter(job);
} }
static void mirror_pause(BlockJob *job) static void mirror_pause(Job *job)
{ {
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
mirror_wait_for_all_io(s); mirror_wait_for_all_io(s);
} }
@ -986,21 +987,31 @@ static void mirror_drain(BlockJob *job)
} }
static const BlockJobDriver mirror_job_driver = { static const BlockJobDriver mirror_job_driver = {
.instance_size = sizeof(MirrorBlockJob), .job_driver = {
.job_type = BLOCK_JOB_TYPE_MIRROR, .instance_size = sizeof(MirrorBlockJob),
.start = mirror_run, .job_type = JOB_TYPE_MIRROR,
.complete = mirror_complete, .free = block_job_free,
.pause = mirror_pause, .user_resume = block_job_user_resume,
.drain = block_job_drain,
.start = mirror_run,
.pause = mirror_pause,
.complete = mirror_complete,
},
.attached_aio_context = mirror_attached_aio_context, .attached_aio_context = mirror_attached_aio_context,
.drain = mirror_drain, .drain = mirror_drain,
}; };
static const BlockJobDriver commit_active_job_driver = { static const BlockJobDriver commit_active_job_driver = {
.instance_size = sizeof(MirrorBlockJob), .job_driver = {
.job_type = BLOCK_JOB_TYPE_COMMIT, .instance_size = sizeof(MirrorBlockJob),
.start = mirror_run, .job_type = JOB_TYPE_COMMIT,
.complete = mirror_complete, .free = block_job_free,
.pause = mirror_pause, .user_resume = block_job_user_resume,
.drain = block_job_drain,
.start = mirror_run,
.pause = mirror_pause,
.complete = mirror_complete,
},
.attached_aio_context = mirror_attached_aio_context, .attached_aio_context = mirror_attached_aio_context,
.drain = mirror_drain, .drain = mirror_drain,
}; };
@ -1237,7 +1248,7 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
} }
trace_mirror_start(bs, s, opaque); trace_mirror_start(bs, s, opaque);
block_job_start(&s->common); job_start(&s->common.job);
return; return;
fail: fail:
@ -1248,7 +1259,7 @@ fail:
g_free(s->replaces); g_free(s->replaces);
blk_unref(s->target); blk_unref(s->target);
block_job_early_fail(&s->common); job_early_fail(&s->common.job);
} }
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
@ -1275,7 +1286,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
} }
is_none_mode = mode == MIRROR_SYNC_MODE_NONE; is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces, mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces,
speed, granularity, buf_size, backing_mode, speed, granularity, buf_size, backing_mode,
on_source_error, on_target_error, unmap, NULL, NULL, on_source_error, on_target_error, unmap, NULL, NULL,
&mirror_job_driver, is_none_mode, base, false, &mirror_job_driver, is_none_mode, base, false,

View File

@ -145,7 +145,7 @@ static void replication_close(BlockDriverState *bs)
replication_stop(s->rs, false, NULL); replication_stop(s->rs, false, NULL);
} }
if (s->stage == BLOCK_REPLICATION_FAILOVER) { if (s->stage == BLOCK_REPLICATION_FAILOVER) {
block_job_cancel_sync(s->active_disk->bs->job); job_cancel_sync(&s->active_disk->bs->job->job);
} }
if (s->mode == REPLICATION_MODE_SECONDARY) { if (s->mode == REPLICATION_MODE_SECONDARY) {
@ -568,7 +568,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
job = backup_job_create(NULL, s->secondary_disk->bs, s->hidden_disk->bs, job = backup_job_create(NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, false, 0, MIRROR_SYNC_MODE_NONE, NULL, false,
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL, BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
backup_job_completed, bs, NULL, &local_err); backup_job_completed, bs, NULL, &local_err);
if (local_err) { if (local_err) {
error_propagate(errp, local_err); error_propagate(errp, local_err);
@ -576,7 +576,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
aio_context_release(aio_context); aio_context_release(aio_context);
return; return;
} }
block_job_start(job); job_start(&job->job);
break; break;
default: default:
aio_context_release(aio_context); aio_context_release(aio_context);
@ -681,7 +681,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
* disk, secondary disk in backup_job_completed(). * disk, secondary disk in backup_job_completed().
*/ */
if (s->secondary_disk->bs->job) { if (s->secondary_disk->bs->job) {
block_job_cancel_sync(s->secondary_disk->bs->job); job_cancel_sync(&s->secondary_disk->bs->job->job);
} }
if (!failover) { if (!failover) {
@ -693,7 +693,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
s->stage = BLOCK_REPLICATION_FAILOVER; s->stage = BLOCK_REPLICATION_FAILOVER;
commit_active_start(NULL, s->active_disk->bs, s->secondary_disk->bs, commit_active_start(NULL, s->active_disk->bs, s->secondary_disk->bs,
BLOCK_JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT,
NULL, replication_done, bs, true, errp); NULL, replication_done, bs, true, errp);
break; break;
default: default:

View File

@ -1859,9 +1859,7 @@ out:
error_setg_errno(errp, -ret, "Can't pre-allocate"); error_setg_errno(errp, -ret, "Can't pre-allocate");
} }
out_with_err_set: out_with_err_set:
if (blk) { blk_unref(blk);
blk_unref(blk);
}
g_free(buf); g_free(buf);
return ret; return ret;

View File

@ -58,16 +58,16 @@ typedef struct {
int ret; int ret;
} StreamCompleteData; } StreamCompleteData;
static void stream_complete(BlockJob *job, void *opaque) static void stream_complete(Job *job, void *opaque)
{ {
StreamBlockJob *s = container_of(job, StreamBlockJob, common); StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common;
StreamCompleteData *data = opaque; StreamCompleteData *data = opaque;
BlockDriverState *bs = blk_bs(job->blk); BlockDriverState *bs = blk_bs(bjob->blk);
BlockDriverState *base = s->base; BlockDriverState *base = s->base;
Error *local_err = NULL; Error *local_err = NULL;
if (!block_job_is_cancelled(&s->common) && bs->backing && if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
data->ret == 0) {
const char *base_id = NULL, *base_fmt = NULL; const char *base_id = NULL, *base_fmt = NULL;
if (base) { if (base) {
base_id = s->backing_file_str; base_id = s->backing_file_str;
@ -88,12 +88,12 @@ out:
/* Reopen the image back in read-only mode if necessary */ /* Reopen the image back in read-only mode if necessary */
if (s->bs_flags != bdrv_get_flags(bs)) { if (s->bs_flags != bdrv_get_flags(bs)) {
/* Give up write permissions before making it read-only */ /* Give up write permissions before making it read-only */
blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
bdrv_reopen(bs, s->bs_flags, NULL); bdrv_reopen(bs, s->bs_flags, NULL);
} }
g_free(s->backing_file_str); g_free(s->backing_file_str);
block_job_completed(&s->common, data->ret); job_completed(job, data->ret);
g_free(data); g_free(data);
} }
@ -121,7 +121,7 @@ static void coroutine_fn stream_run(void *opaque)
ret = len; ret = len;
goto out; goto out;
} }
block_job_progress_set_remaining(&s->common, len); job_progress_set_remaining(&s->common.job, len);
buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE); buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
@ -140,8 +140,8 @@ static void coroutine_fn stream_run(void *opaque)
/* Note that even when no rate limit is applied we need to yield /* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns. * with no pending I/O here so that bdrv_drain_all() returns.
*/ */
block_job_sleep_ns(&s->common, delay_ns); job_sleep_ns(&s->common.job, delay_ns);
if (block_job_is_cancelled(&s->common)) { if (job_is_cancelled(&s->common.job)) {
break; break;
} }
@ -184,7 +184,7 @@ static void coroutine_fn stream_run(void *opaque)
ret = 0; ret = 0;
/* Publish progress */ /* Publish progress */
block_job_progress_update(&s->common, n); job_progress_update(&s->common.job, n);
if (copy) { if (copy) {
delay_ns = block_job_ratelimit_get_delay(&s->common, n); delay_ns = block_job_ratelimit_get_delay(&s->common, n);
} else { } else {
@ -205,13 +205,18 @@ out:
/* Modify backing chain and close BDSes in main loop */ /* Modify backing chain and close BDSes in main loop */
data = g_malloc(sizeof(*data)); data = g_malloc(sizeof(*data));
data->ret = ret; data->ret = ret;
block_job_defer_to_main_loop(&s->common, stream_complete, data); job_defer_to_main_loop(&s->common.job, stream_complete, data);
} }
static const BlockJobDriver stream_job_driver = { static const BlockJobDriver stream_job_driver = {
.instance_size = sizeof(StreamBlockJob), .job_driver = {
.job_type = BLOCK_JOB_TYPE_STREAM, .instance_size = sizeof(StreamBlockJob),
.start = stream_run, .job_type = JOB_TYPE_STREAM,
.free = block_job_free,
.start = stream_run,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
},
}; };
void stream_start(const char *job_id, BlockDriverState *bs, void stream_start(const char *job_id, BlockDriverState *bs,
@ -238,7 +243,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
BLK_PERM_GRAPH_MOD, BLK_PERM_GRAPH_MOD,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
BLK_PERM_WRITE, BLK_PERM_WRITE,
speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); speed, JOB_DEFAULT, NULL, NULL, errp);
if (!s) { if (!s) {
goto fail; goto fail;
} }
@ -259,7 +264,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
s->on_error = on_error; s->on_error = on_error;
trace_stream_start(bs, base, s); trace_stream_start(bs, base, s);
block_job_start(&s->common); job_start(&s->common.job);
return; return;
fail: fail:

View File

@ -4,11 +4,6 @@
bdrv_open_common(void *bs, const char *filename, int flags, const char *format_name) "bs %p filename \"%s\" flags 0x%x format_name \"%s\"" bdrv_open_common(void *bs, const char *filename, int flags, const char *format_name) "bs %p filename \"%s\" flags 0x%x format_name \"%s\""
bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
# blockjob.c
block_job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
# block/block-backend.c # block/block-backend.c
blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"

View File

@ -150,7 +150,7 @@ void blockdev_mark_auto_del(BlockBackend *blk)
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
if (bs->job) { if (bs->job) {
block_job_cancel(bs->job, false); job_cancel(&bs->job->job, false);
} }
aio_context_release(aio_context); aio_context_release(aio_context);
@ -1446,7 +1446,7 @@ typedef struct BlkActionOps {
struct BlkActionState { struct BlkActionState {
TransactionAction *action; TransactionAction *action;
const BlkActionOps *ops; const BlkActionOps *ops;
BlockJobTxn *block_job_txn; JobTxn *block_job_txn;
TransactionProperties *txn_props; TransactionProperties *txn_props;
QSIMPLEQ_ENTRY(BlkActionState) entry; QSIMPLEQ_ENTRY(BlkActionState) entry;
}; };
@ -1864,7 +1864,7 @@ typedef struct DriveBackupState {
BlockJob *job; BlockJob *job;
} DriveBackupState; } DriveBackupState;
static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
Error **errp); Error **errp);
static void drive_backup_prepare(BlkActionState *common, Error **errp) static void drive_backup_prepare(BlkActionState *common, Error **errp)
@ -1910,7 +1910,7 @@ static void drive_backup_commit(BlkActionState *common)
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
assert(state->job); assert(state->job);
block_job_start(state->job); job_start(&state->job->job);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -1925,7 +1925,7 @@ static void drive_backup_abort(BlkActionState *common)
aio_context = bdrv_get_aio_context(state->bs); aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
block_job_cancel_sync(state->job); job_cancel_sync(&state->job->job);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -1954,7 +1954,7 @@ typedef struct BlockdevBackupState {
BlockJob *job; BlockJob *job;
} BlockdevBackupState; } BlockdevBackupState;
static BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, static BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
Error **errp); Error **errp);
static void blockdev_backup_prepare(BlkActionState *common, Error **errp) static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
@ -2008,7 +2008,7 @@ static void blockdev_backup_commit(BlkActionState *common)
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
assert(state->job); assert(state->job);
block_job_start(state->job); job_start(&state->job->job);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -2023,7 +2023,7 @@ static void blockdev_backup_abort(BlkActionState *common)
aio_context = bdrv_get_aio_context(state->bs); aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
block_job_cancel_sync(state->job); job_cancel_sync(&state->job->job);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -2243,7 +2243,7 @@ void qmp_transaction(TransactionActionList *dev_list,
Error **errp) Error **errp)
{ {
TransactionActionList *dev_entry = dev_list; TransactionActionList *dev_entry = dev_list;
BlockJobTxn *block_job_txn = NULL; JobTxn *block_job_txn = NULL;
BlkActionState *state, *next; BlkActionState *state, *next;
Error *local_err = NULL; Error *local_err = NULL;
@ -2251,11 +2251,11 @@ void qmp_transaction(TransactionActionList *dev_list,
QSIMPLEQ_INIT(&snap_bdrv_states); QSIMPLEQ_INIT(&snap_bdrv_states);
/* Does this transaction get canceled as a group on failure? /* Does this transaction get canceled as a group on failure?
* If not, we don't really need to make a BlockJobTxn. * If not, we don't really need to make a JobTxn.
*/ */
props = get_transaction_properties(props); props = get_transaction_properties(props);
if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) { if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
block_job_txn = block_job_txn_new(); block_job_txn = job_txn_new();
} }
/* drain all i/o before any operations */ /* drain all i/o before any operations */
@ -2314,7 +2314,7 @@ exit:
if (!has_props) { if (!has_props) {
qapi_free_TransactionProperties(props); qapi_free_TransactionProperties(props);
} }
block_job_txn_unref(block_job_txn); job_txn_unref(block_job_txn);
} }
void qmp_eject(bool has_device, const char *device, void qmp_eject(bool has_device, const char *device,
@ -3244,7 +3244,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
goto out; goto out;
} }
commit_active_start(has_job_id ? job_id : NULL, bs, base_bs, commit_active_start(has_job_id ? job_id : NULL, bs, base_bs,
BLOCK_JOB_DEFAULT, speed, on_error, JOB_DEFAULT, speed, on_error,
filter_node_name, NULL, NULL, false, &local_err); filter_node_name, NULL, NULL, false, &local_err);
} else { } else {
BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs); BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
@ -3264,7 +3264,7 @@ out:
aio_context_release(aio_context); aio_context_release(aio_context);
} }
static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
Error **errp) Error **errp)
{ {
BlockDriverState *bs; BlockDriverState *bs;
@ -3275,7 +3275,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
AioContext *aio_context; AioContext *aio_context;
QDict *options = NULL; QDict *options = NULL;
Error *local_err = NULL; Error *local_err = NULL;
int flags, job_flags = BLOCK_JOB_DEFAULT; int flags, job_flags = JOB_DEFAULT;
int64_t size; int64_t size;
bool set_backing_hd = false; bool set_backing_hd = false;
@ -3398,10 +3398,10 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
} }
} }
if (!backup->auto_finalize) { if (!backup->auto_finalize) {
job_flags |= BLOCK_JOB_MANUAL_FINALIZE; job_flags |= JOB_MANUAL_FINALIZE;
} }
if (!backup->auto_dismiss) { if (!backup->auto_dismiss) {
job_flags |= BLOCK_JOB_MANUAL_DISMISS; job_flags |= JOB_MANUAL_DISMISS;
} }
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed, job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
@ -3425,7 +3425,7 @@ void qmp_drive_backup(DriveBackup *arg, Error **errp)
BlockJob *job; BlockJob *job;
job = do_drive_backup(arg, NULL, errp); job = do_drive_backup(arg, NULL, errp);
if (job) { if (job) {
block_job_start(job); job_start(&job->job);
} }
} }
@ -3434,7 +3434,7 @@ BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
return bdrv_named_nodes_list(errp); return bdrv_named_nodes_list(errp);
} }
BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
Error **errp) Error **errp)
{ {
BlockDriverState *bs; BlockDriverState *bs;
@ -3442,7 +3442,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
Error *local_err = NULL; Error *local_err = NULL;
AioContext *aio_context; AioContext *aio_context;
BlockJob *job = NULL; BlockJob *job = NULL;
int job_flags = BLOCK_JOB_DEFAULT; int job_flags = JOB_DEFAULT;
if (!backup->has_speed) { if (!backup->has_speed) {
backup->speed = 0; backup->speed = 0;
@ -3491,10 +3491,10 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
} }
} }
if (!backup->auto_finalize) { if (!backup->auto_finalize) {
job_flags |= BLOCK_JOB_MANUAL_FINALIZE; job_flags |= JOB_MANUAL_FINALIZE;
} }
if (!backup->auto_dismiss) { if (!backup->auto_dismiss) {
job_flags |= BLOCK_JOB_MANUAL_DISMISS; job_flags |= JOB_MANUAL_DISMISS;
} }
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed, job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, NULL, backup->compress, backup->sync, NULL, backup->compress,
@ -3513,7 +3513,7 @@ void qmp_blockdev_backup(BlockdevBackup *arg, Error **errp)
BlockJob *job; BlockJob *job;
job = do_blockdev_backup(arg, NULL, errp); job = do_blockdev_backup(arg, NULL, errp);
if (job) { if (job) {
block_job_start(job); job_start(&job->job);
} }
} }
@ -3844,14 +3844,14 @@ void qmp_block_job_cancel(const char *device,
force = false; force = false;
} }
if (block_job_user_paused(job) && !force) { if (job_user_paused(&job->job) && !force) {
error_setg(errp, "The block job for device '%s' is currently paused", error_setg(errp, "The block job for device '%s' is currently paused",
device); device);
goto out; goto out;
} }
trace_qmp_block_job_cancel(job); trace_qmp_block_job_cancel(job);
block_job_user_cancel(job, force, errp); job_user_cancel(&job->job, force, errp);
out: out:
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -3866,7 +3866,7 @@ void qmp_block_job_pause(const char *device, Error **errp)
} }
trace_qmp_block_job_pause(job); trace_qmp_block_job_pause(job);
block_job_user_pause(job, errp); job_user_pause(&job->job, errp);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -3880,7 +3880,7 @@ void qmp_block_job_resume(const char *device, Error **errp)
} }
trace_qmp_block_job_resume(job); trace_qmp_block_job_resume(job);
block_job_user_resume(job, errp); job_user_resume(&job->job, errp);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -3894,7 +3894,7 @@ void qmp_block_job_complete(const char *device, Error **errp)
} }
trace_qmp_block_job_complete(job); trace_qmp_block_job_complete(job);
block_job_complete(job, errp); job_complete(&job->job, errp);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
@ -3908,21 +3908,23 @@ void qmp_block_job_finalize(const char *id, Error **errp)
} }
trace_qmp_block_job_finalize(job); trace_qmp_block_job_finalize(job);
block_job_finalize(job, errp); job_finalize(&job->job, errp);
aio_context_release(aio_context); aio_context_release(aio_context);
} }
void qmp_block_job_dismiss(const char *id, Error **errp) void qmp_block_job_dismiss(const char *id, Error **errp)
{ {
AioContext *aio_context; AioContext *aio_context;
BlockJob *job = find_block_job(id, &aio_context, errp); BlockJob *bjob = find_block_job(id, &aio_context, errp);
Job *job;
if (!job) { if (!bjob) {
return; return;
} }
trace_qmp_block_job_dismiss(job); trace_qmp_block_job_dismiss(bjob);
block_job_dismiss(&job, errp); job = &bjob->job;
job_dismiss(&job, errp);
aio_context_release(aio_context); aio_context_release(aio_context);
} }

1102
blockjob.c

File diff suppressed because it is too large Load Diff

View File

@ -1029,7 +1029,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockdevOnError on_target_error, BlockdevOnError on_target_error,
int creation_flags, int creation_flags,
BlockCompletionFunc *cb, void *opaque, BlockCompletionFunc *cb, void *opaque,
BlockJobTxn *txn, Error **errp); JobTxn *txn, Error **errp);
void hmp_drive_add_node(Monitor *mon, const char *optstr); void hmp_drive_add_node(Monitor *mon, const char *optstr);

View File

@ -26,13 +26,13 @@
#ifndef BLOCKJOB_H #ifndef BLOCKJOB_H
#define BLOCKJOB_H #define BLOCKJOB_H
#include "qemu/job.h"
#include "block/block.h" #include "block/block.h"
#include "qemu/ratelimit.h" #include "qemu/ratelimit.h"
#define BLOCK_JOB_SLICE_TIME 100000000ULL /* ns */ #define BLOCK_JOB_SLICE_TIME 100000000ULL /* ns */
typedef struct BlockJobDriver BlockJobDriver; typedef struct BlockJobDriver BlockJobDriver;
typedef struct BlockJobTxn BlockJobTxn;
/** /**
* BlockJob: * BlockJob:
@ -40,141 +40,40 @@ typedef struct BlockJobTxn BlockJobTxn;
* Long-running operation on a BlockDriverState. * Long-running operation on a BlockDriverState.
*/ */
typedef struct BlockJob { typedef struct BlockJob {
/** The job type, including the job vtable. */ /** Data belonging to the generic Job infrastructure */
const BlockJobDriver *driver; Job job;
/** The block device on which the job is operating. */ /** The block device on which the job is operating. */
BlockBackend *blk; BlockBackend *blk;
/**
* The ID of the block job. May be NULL for internal jobs.
*/
char *id;
/**
* The coroutine that executes the job. If not NULL, it is
* reentered when busy is false and the job is cancelled.
*/
Coroutine *co;
/**
* Set to true if the job should cancel itself. The flag must
* always be tested just before toggling the busy flag from false
* to true. After a job has been cancelled, it should only yield
* if #aio_poll will ("sooner or later") reenter the coroutine.
*/
bool cancelled;
/**
* Set to true if the job should abort immediately without waiting
* for data to be in sync.
*/
bool force;
/**
* Counter for pause request. If non-zero, the block job is either paused,
* or if busy == true will pause itself as soon as possible.
*/
int pause_count;
/**
* Set to true if the job is paused by user. Can be unpaused with the
* block-job-resume QMP command.
*/
bool user_paused;
/**
* Set to false by the job while the coroutine has yielded and may be
* re-entered by block_job_enter(). There may still be I/O or event loop
* activity pending. Accessed under block_job_mutex (in blockjob.c).
*/
bool busy;
/**
* Set to true by the job while it is in a quiescent state, where
* no I/O or event loop activity is pending.
*/
bool paused;
/**
* Set to true when the job is ready to be completed.
*/
bool ready;
/**
* Set to true when the job has deferred work to the main loop.
*/
bool deferred_to_main_loop;
/** Element of the list of block jobs */
QLIST_ENTRY(BlockJob) job_list;
/** Status that is published by the query-block-jobs QMP API */ /** Status that is published by the query-block-jobs QMP API */
BlockDeviceIoStatus iostatus; BlockDeviceIoStatus iostatus;
/** Offset that is published by the query-block-jobs QMP API */
int64_t offset;
/** Length that is published by the query-block-jobs QMP API */
int64_t len;
/** Speed that was set with @block_job_set_speed. */ /** Speed that was set with @block_job_set_speed. */
int64_t speed; int64_t speed;
/** Rate limiting data structure for implementing @speed. */ /** Rate limiting data structure for implementing @speed. */
RateLimit limit; RateLimit limit;
/** The completion function that will be called when the job completes. */
BlockCompletionFunc *cb;
/** Block other operations when block job is running */ /** Block other operations when block job is running */
Error *blocker; Error *blocker;
/** Called when a cancelled job is finalised. */
Notifier finalize_cancelled_notifier;
/** Called when a successfully completed job is finalised. */
Notifier finalize_completed_notifier;
/** Called when the job transitions to PENDING */
Notifier pending_notifier;
/** Called when the job transitions to READY */
Notifier ready_notifier;
/** BlockDriverStates that are involved in this block job */ /** BlockDriverStates that are involved in this block job */
GSList *nodes; GSList *nodes;
/** The opaque value that is passed to the completion function. */
void *opaque;
/** Reference count of the block job */
int refcnt;
/** True when job has reported completion by calling block_job_completed. */
bool completed;
/** ret code passed to block_job_completed. */
int ret;
/**
* Timer that is used by @block_job_sleep_ns. Accessed under
* block_job_mutex (in blockjob.c).
*/
QEMUTimer sleep_timer;
/** Current state; See @BlockJobStatus for details. */
BlockJobStatus status;
/** True if this job should automatically finalize itself */
bool auto_finalize;
/** True if this job should automatically dismiss itself */
bool auto_dismiss;
BlockJobTxn *txn;
QLIST_ENTRY(BlockJob) txn_list;
} BlockJob; } BlockJob;
typedef enum BlockJobCreateFlags {
/* Default behavior */
BLOCK_JOB_DEFAULT = 0x00,
/* BlockJob is not QMP-created and should not send QMP events */
BLOCK_JOB_INTERNAL = 0x01,
/* BlockJob requires manual finalize step */
BLOCK_JOB_MANUAL_FINALIZE = 0x02,
/* BlockJob requires manual dismiss step */
BLOCK_JOB_MANUAL_DISMISS = 0x04,
} BlockJobCreateFlags;
/** /**
* block_job_next: * block_job_next:
* @job: A block job, or %NULL. * @job: A block job, or %NULL.
@ -230,78 +129,6 @@ void block_job_remove_all_bdrv(BlockJob *job);
*/ */
void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
/**
* block_job_start:
* @job: A job that has not yet been started.
*
* Begins execution of a block job.
* Takes ownership of one reference to the job object.
*/
void block_job_start(BlockJob *job);
/**
* block_job_cancel:
* @job: The job to be canceled.
* @force: Quit a job without waiting for data to be in sync.
*
* Asynchronously cancel the specified job.
*/
void block_job_cancel(BlockJob *job, bool force);
/**
* block_job_complete:
* @job: The job to be completed.
* @errp: Error object.
*
* Asynchronously complete the specified job.
*/
void block_job_complete(BlockJob *job, Error **errp);
/**
* block_job_finalize:
* @job: The job to fully commit and finish.
* @errp: Error object.
*
* For jobs that have finished their work and are pending
* awaiting explicit acknowledgement to commit their work,
* This will commit that work.
*
* FIXME: Make the below statement universally true:
* For jobs that support the manual workflow mode, all graph
* changes that occur as a result will occur after this command
* and before a successful reply.
*/
void block_job_finalize(BlockJob *job, Error **errp);
/**
* block_job_dismiss:
* @job: The job to be dismissed.
* @errp: Error object.
*
* Remove a concluded job from the query list.
*/
void block_job_dismiss(BlockJob **job, Error **errp);
/**
* block_job_progress_update:
* @job: The job that has made progress
* @done: How much progress the job made
*
* Updates the progress counter of the job.
*/
void block_job_progress_update(BlockJob *job, uint64_t done);
/**
* block_job_progress_set_remaining:
* @job: The job whose expected progress end value is set
* @remaining: Expected end value of the progress counter of the job
*
* Sets the expected end value of the progress counter of a job so that a
* completion percentage can be calculated when the progress is updated.
*/
void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining);
/** /**
* block_job_query: * block_job_query:
* @job: The job to get information about. * @job: The job to get information about.
@ -310,78 +137,6 @@ void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining);
*/ */
BlockJobInfo *block_job_query(BlockJob *job, Error **errp); BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
/**
* block_job_user_pause:
* @job: The job to be paused.
*
* Asynchronously pause the specified job.
* Do not allow a resume until a matching call to block_job_user_resume.
*/
void block_job_user_pause(BlockJob *job, Error **errp);
/**
* block_job_paused:
* @job: The job to query.
*
* Returns true if the job is user-paused.
*/
bool block_job_user_paused(BlockJob *job);
/**
* block_job_user_resume:
* @job: The job to be resumed.
*
* Resume the specified job.
* Must be paired with a preceding block_job_user_pause.
*/
void block_job_user_resume(BlockJob *job, Error **errp);
/**
* block_job_user_cancel:
* @job: The job to be cancelled.
* @force: Quit a job without waiting for data to be in sync.
*
* Cancels the specified job, but may refuse to do so if the
* operation isn't currently meaningful.
*/
void block_job_user_cancel(BlockJob *job, bool force, Error **errp);
/**
* block_job_cancel_sync:
* @job: The job to be canceled.
*
* Synchronously cancel the job. The completion callback is called
* before the function returns. The job may actually complete
* instead of canceling itself; the circumstances under which this
* happens depend on the kind of job that is active.
*
* Returns the return value from the job if the job actually completed
* during the call, or -ECANCELED if it was canceled.
*/
int block_job_cancel_sync(BlockJob *job);
/**
* block_job_cancel_sync_all:
*
* Synchronously cancels all jobs using block_job_cancel_sync().
*/
void block_job_cancel_sync_all(void);
/**
* block_job_complete_sync:
* @job: The job to be completed.
* @errp: Error object which may be set by block_job_complete(); this is not
* necessarily set on every error, the job return value has to be
* checked as well.
*
* Synchronously complete the job. The completion callback is called before the
* function returns, unless it is NULL (which is permissible when using this
* function).
*
* Returns the return value from the job.
*/
int block_job_complete_sync(BlockJob *job, Error **errp);
/** /**
* block_job_iostatus_reset: * block_job_iostatus_reset:
* @job: The job whose I/O status should be reset. * @job: The job whose I/O status should be reset.
@ -391,59 +146,6 @@ int block_job_complete_sync(BlockJob *job, Error **errp);
*/ */
void block_job_iostatus_reset(BlockJob *job); void block_job_iostatus_reset(BlockJob *job);
/**
* block_job_txn_new:
*
* Allocate and return a new block job transaction. Jobs can be added to the
* transaction using block_job_txn_add_job().
*
* The transaction is automatically freed when the last job completes or is
* cancelled.
*
* All jobs in the transaction either complete successfully or fail/cancel as a
* group. Jobs wait for each other before completing. Cancelling one job
* cancels all jobs in the transaction.
*/
BlockJobTxn *block_job_txn_new(void);
/**
* block_job_ref:
*
* Add a reference to BlockJob refcnt, it will be decreased with
* block_job_unref, and then be freed if it comes to be the last
* reference.
*/
void block_job_ref(BlockJob *job);
/**
* block_job_unref:
*
* Release a reference that was previously acquired with block_job_ref
* or block_job_create. If it's the last reference to the object, it will be
* freed.
*/
void block_job_unref(BlockJob *job);
/**
* block_job_txn_unref:
*
* Release a reference that was previously acquired with block_job_txn_add_job
* or block_job_txn_new. If it's the last reference to the object, it will be
* freed.
*/
void block_job_txn_unref(BlockJobTxn *txn);
/**
* block_job_txn_add_job:
* @txn: The transaction (may be NULL)
* @job: Job to add to the transaction
*
* Add @job to the transaction. The @job must not already be in a transaction.
* The caller must call either block_job_txn_unref() or block_job_completed()
* to release the reference that is automatically grabbed here.
*/
void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job);
/** /**
* block_job_is_internal: * block_job_is_internal:
* @job: The job to determine if it is user-visible or not. * @job: The job to determine if it is user-visible or not.

View File

@ -35,72 +35,8 @@
* A class type for block job driver. * A class type for block job driver.
*/ */
struct BlockJobDriver { struct BlockJobDriver {
/** Derived BlockJob struct size */ /** Generic JobDriver callbacks and settings */
size_t instance_size; JobDriver job_driver;
/** String describing the operation, part of query-block-jobs QMP API */
BlockJobType job_type;
/** Mandatory: Entrypoint for the Coroutine. */
CoroutineEntry *start;
/**
* Optional callback for job types whose completion must be triggered
* manually.
*/
void (*complete)(BlockJob *job, Error **errp);
/**
* If the callback is not NULL, prepare will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's completion
* if it is not in a transaction.
*
* This callback will not be invoked if the job has already failed.
* If it fails, abort and then clean will be called.
*/
int (*prepare)(BlockJob *job);
/**
* If the callback is not NULL, it will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's
* completion if it is not in a transaction. Skipped if NULL.
*
* All jobs will complete with a call to either .commit() or .abort() but
* never both.
*/
void (*commit)(BlockJob *job);
/**
* If the callback is not NULL, it will be invoked when any job in the
* same transaction fails; or upon this job's failure (due to error or
* cancellation) if it is not in a transaction. Skipped if NULL.
*
* All jobs will complete with a call to either .commit() or .abort() but
* never both.
*/
void (*abort)(BlockJob *job);
/**
* If the callback is not NULL, it will be invoked after a call to either
* .commit() or .abort(). Regardless of which callback is invoked after
* completion, .clean() will always be called, even if the job does not
* belong to a transaction group.
*/
void (*clean)(BlockJob *job);
/**
* If the callback is not NULL, it will be invoked when the job transitions
* into the paused state. Paused jobs must not perform any asynchronous
* I/O or event loop activity. This callback is used to quiesce jobs.
*/
void coroutine_fn (*pause)(BlockJob *job);
/**
* If the callback is not NULL, it will be invoked when the job transitions
* out of the paused state. Any asynchronous I/O or event loop activity
* should be restarted from this callback.
*/
void coroutine_fn (*resume)(BlockJob *job);
/* /*
* If the callback is not NULL, it will be invoked before the job is * If the callback is not NULL, it will be invoked before the job is
@ -113,6 +49,10 @@ struct BlockJobDriver {
* If the callback is not NULL, it will be invoked when the job has to be * If the callback is not NULL, it will be invoked when the job has to be
* synchronously cancelled or completed; it should drain BlockDriverStates * synchronously cancelled or completed; it should drain BlockDriverStates
* as required to ensure progress. * as required to ensure progress.
*
* Block jobs must use the default implementation for job_driver.drain,
* which will in turn call this callback after doing generic block job
* stuff.
*/ */
void (*drain)(BlockJob *job); void (*drain)(BlockJob *job);
}; };
@ -126,8 +66,7 @@ struct BlockJobDriver {
* @bs: The block * @bs: The block
* @perm, @shared_perm: Permissions to request for @bs * @perm, @shared_perm: Permissions to request for @bs
* @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @flags: Creation flags for the Block Job. * @flags: Creation flags for the Block Job. See @JobCreateFlags.
* See @BlockJobCreateFlags
* @cb: Completion function for the job. * @cb: Completion function for the job.
* @opaque: Opaque pointer value passed to @cb. * @opaque: Opaque pointer value passed to @cb.
* @errp: Error object. * @errp: Error object.
@ -142,28 +81,31 @@ struct BlockJobDriver {
* called from a wrapper that is specific to the job type. * called from a wrapper that is specific to the job type.
*/ */
void *block_job_create(const char *job_id, const BlockJobDriver *driver, void *block_job_create(const char *job_id, const BlockJobDriver *driver,
BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm, JobTxn *txn, BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, int64_t speed, int flags, uint64_t shared_perm, int64_t speed, int flags,
BlockCompletionFunc *cb, void *opaque, Error **errp); BlockCompletionFunc *cb, void *opaque, Error **errp);
/** /**
* block_job_sleep_ns: * block_job_free:
* @job: The job that calls the function. * Callback to be used for JobDriver.free in all block jobs. Frees block job
* @ns: How many nanoseconds to stop for. * specific resources in @job.
*
* Put the job to sleep (assuming that it wasn't canceled) for @ns
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
* interrupt the wait.
*/ */
void block_job_sleep_ns(BlockJob *job, int64_t ns); void block_job_free(Job *job);
/** /**
* block_job_yield: * block_job_user_resume:
* @job: The job that calls the function. * Callback to be used for JobDriver.user_resume in all block jobs. Resets the
* * iostatus when the user resumes @job.
* Yield the block job coroutine.
*/ */
void block_job_yield(BlockJob *job); void block_job_user_resume(Job *job);
/**
* block_job_drain:
* Callback to be used for JobDriver.drain in all block jobs. Drains the main
* block node associated with the block jobs and calls BlockJobDriver.drain for
* job-specific actions.
*/
void block_job_drain(Job *job);
/** /**
* block_job_ratelimit_get_delay: * block_job_ratelimit_get_delay:
@ -173,57 +115,6 @@ void block_job_yield(BlockJob *job);
*/ */
int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n); int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n);
/**
* block_job_early_fail:
* @bs: The block device.
*
* The block job could not be started, free it.
*/
void block_job_early_fail(BlockJob *job);
/**
* block_job_completed:
* @job: The job being completed.
* @ret: The status code.
*
* Call the completion function that was registered at creation time, and
* free @job.
*/
void block_job_completed(BlockJob *job, int ret);
/**
* block_job_is_cancelled:
* @job: The job being queried.
*
* Returns whether the job is scheduled for cancellation.
*/
bool block_job_is_cancelled(BlockJob *job);
/**
* block_job_pause_point:
* @job: The job that is ready to pause.
*
* Pause now if block_job_pause() has been called. Block jobs that perform
* lots of I/O must call this between requests so that the job can be paused.
*/
void coroutine_fn block_job_pause_point(BlockJob *job);
/**
* block_job_enter:
* @job: The job to enter.
*
* Continue the specified job by entering the coroutine.
*/
void block_job_enter(BlockJob *job);
/**
* block_job_event_ready:
* @job: The job which is now ready to be completed.
*
* Send a BLOCK_JOB_READY event for the specified job.
*/
void block_job_event_ready(BlockJob *job);
/** /**
* block_job_error_action: * block_job_error_action:
* @job: The job to signal an error for. * @job: The job to signal an error for.
@ -237,23 +128,4 @@ void block_job_event_ready(BlockJob *job);
BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
int is_read, int error); int is_read, int error);
typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque);
/**
* block_job_defer_to_main_loop:
* @job: The job
* @fn: The function to run in the main loop
* @opaque: The opaque value that is passed to @fn
*
* This function must be called by the main job coroutine just before it
* returns. @fn is executed in the main loop with the BlockDriverState
* AioContext acquired. Block jobs must call bdrv_unref(), bdrv_close(), and
* anything that uses bdrv_drain_all() in the main loop.
*
* The @job AioContext is held while @fn executes.
*/
void block_job_defer_to_main_loop(BlockJob *job,
BlockJobDeferToMainLoopFn *fn,
void *opaque);
#endif #endif

562
include/qemu/job.h Normal file
View File

@ -0,0 +1,562 @@
/*
* Declarations for background jobs
*
* Copyright (c) 2011 IBM Corp.
* Copyright (c) 2012, 2018 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef JOB_H
#define JOB_H
#include "qapi/qapi-types-block-core.h"
#include "qemu/queue.h"
#include "qemu/coroutine.h"
#include "block/aio.h"
typedef struct JobDriver JobDriver;
typedef struct JobTxn JobTxn;
/**
* Long-running operation.
*/
typedef struct Job {
/** The ID of the job. May be NULL for internal jobs. */
char *id;
/** The type of this job. */
const JobDriver *driver;
/** Reference count of the block job */
int refcnt;
/** Current state; See @JobStatus for details. */
JobStatus status;
/** AioContext to run the job coroutine in */
AioContext *aio_context;
/**
* The coroutine that executes the job. If not NULL, it is reentered when
* busy is false and the job is cancelled.
*/
Coroutine *co;
/**
* Timer that is used by @job_sleep_ns. Accessed under job_mutex (in
* job.c).
*/
QEMUTimer sleep_timer;
/**
* Counter for pause request. If non-zero, the block job is either paused,
* or if busy == true will pause itself as soon as possible.
*/
int pause_count;
/**
* Set to false by the job while the coroutine has yielded and may be
* re-entered by job_enter(). There may still be I/O or event loop activity
* pending. Accessed under block_job_mutex (in blockjob.c).
*/
bool busy;
/**
* Set to true by the job while it is in a quiescent state, where
* no I/O or event loop activity is pending.
*/
bool paused;
/**
* Set to true if the job is paused by user. Can be unpaused with the
* block-job-resume QMP command.
*/
bool user_paused;
/**
* Set to true if the job should cancel itself. The flag must
* always be tested just before toggling the busy flag from false
* to true. After a job has been cancelled, it should only yield
* if #aio_poll will ("sooner or later") reenter the coroutine.
*/
bool cancelled;
/**
* Set to true if the job should abort immediately without waiting
* for data to be in sync.
*/
bool force_cancel;
/** Set to true when the job has deferred work to the main loop. */
bool deferred_to_main_loop;
/** True if this job should automatically finalize itself */
bool auto_finalize;
/** True if this job should automatically dismiss itself */
bool auto_dismiss;
/**
* Current progress. The unit is arbitrary as long as the ratio between
* progress_current and progress_total represents the estimated percentage
* of work already done.
*/
int64_t progress_current;
/** Estimated progress_current value at the completion of the job */
int64_t progress_total;
/** ret code passed to job_completed. */
int ret;
/** The completion function that will be called when the job completes. */
BlockCompletionFunc *cb;
/** The opaque value that is passed to the completion function. */
void *opaque;
/** Notifiers called when a cancelled job is finalised */
NotifierList on_finalize_cancelled;
/** Notifiers called when a successfully completed job is finalised */
NotifierList on_finalize_completed;
/** Notifiers called when the job transitions to PENDING */
NotifierList on_pending;
/** Notifiers called when the job transitions to READY */
NotifierList on_ready;
/** Element of the list of jobs */
QLIST_ENTRY(Job) job_list;
/** Transaction this job is part of */
JobTxn *txn;
/** Element of the list of jobs in a job transaction */
QLIST_ENTRY(Job) txn_list;
} Job;
/**
* Callbacks and other information about a Job driver.
*/
struct JobDriver {
/** Derived Job struct size */
size_t instance_size;
/** Enum describing the operation */
JobType job_type;
/** Mandatory: Entrypoint for the Coroutine. */
CoroutineEntry *start;
/**
* If the callback is not NULL, it will be invoked when the job transitions
* into the paused state. Paused jobs must not perform any asynchronous
* I/O or event loop activity. This callback is used to quiesce jobs.
*/
void coroutine_fn (*pause)(Job *job);
/**
* If the callback is not NULL, it will be invoked when the job transitions
* out of the paused state. Any asynchronous I/O or event loop activity
* should be restarted from this callback.
*/
void coroutine_fn (*resume)(Job *job);
/**
* Called when the job is resumed by the user (i.e. user_paused becomes
* false). .user_resume is called before .resume.
*/
void (*user_resume)(Job *job);
/**
* Optional callback for job types whose completion must be triggered
* manually.
*/
void (*complete)(Job *job, Error **errp);
/*
* If the callback is not NULL, it will be invoked when the job has to be
* synchronously cancelled or completed; it should drain any activities
* as required to ensure progress.
*/
void (*drain)(Job *job);
/**
* If the callback is not NULL, prepare will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's completion
* if it is not in a transaction.
*
* This callback will not be invoked if the job has already failed.
* If it fails, abort and then clean will be called.
*/
int (*prepare)(Job *job);
/**
* If the callback is not NULL, it will be invoked when all the jobs
* belonging to the same transaction complete; or upon this job's
* completion if it is not in a transaction. Skipped if NULL.
*
* All jobs will complete with a call to either .commit() or .abort() but
* never both.
*/
void (*commit)(Job *job);
/**
* If the callback is not NULL, it will be invoked when any job in the
* same transaction fails; or upon this job's failure (due to error or
* cancellation) if it is not in a transaction. Skipped if NULL.
*
* All jobs will complete with a call to either .commit() or .abort() but
* never both.
*/
void (*abort)(Job *job);
/**
* If the callback is not NULL, it will be invoked after a call to either
* .commit() or .abort(). Regardless of which callback is invoked after
* completion, .clean() will always be called, even if the job does not
* belong to a transaction group.
*/
void (*clean)(Job *job);
/** Called when the job is freed */
void (*free)(Job *job);
};
typedef enum JobCreateFlags {
/* Default behavior */
JOB_DEFAULT = 0x00,
/* Job is not QMP-created and should not send QMP events */
JOB_INTERNAL = 0x01,
/* Job requires manual finalize step */
JOB_MANUAL_FINALIZE = 0x02,
/* Job requires manual dismiss step */
JOB_MANUAL_DISMISS = 0x04,
} JobCreateFlags;
/**
* Allocate and return a new job transaction. Jobs can be added to the
* transaction using job_txn_add_job().
*
* The transaction is automatically freed when the last job completes or is
* cancelled.
*
* All jobs in the transaction either complete successfully or fail/cancel as a
* group. Jobs wait for each other before completing. Cancelling one job
* cancels all jobs in the transaction.
*/
JobTxn *job_txn_new(void);
/**
* Release a reference that was previously acquired with job_txn_add_job or
* job_txn_new. If it's the last reference to the object, it will be freed.
*/
void job_txn_unref(JobTxn *txn);
/**
* @txn: The transaction (may be NULL)
* @job: Job to add to the transaction
*
* Add @job to the transaction. The @job must not already be in a transaction.
* The caller must call either job_txn_unref() or job_completed() to release
* the reference that is automatically grabbed here.
*
* If @txn is NULL, the function does nothing.
*/
void job_txn_add_job(JobTxn *txn, Job *job);
/**
* Create a new long-running job and return it.
*
* @job_id: The id of the newly-created job, or %NULL for internal jobs
* @driver: The class object for the newly-created job.
* @txn: The transaction this job belongs to, if any. %NULL otherwise.
* @ctx: The AioContext to run the job coroutine in.
* @flags: Creation flags for the job. See @JobCreateFlags.
* @cb: Completion function for the job.
* @opaque: Opaque pointer value passed to @cb.
* @errp: Error object.
*/
void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
AioContext *ctx, int flags, BlockCompletionFunc *cb,
void *opaque, Error **errp);
/**
* Add a reference to Job refcnt, it will be decreased with job_unref, and then
* be freed if it comes to be the last reference.
*/
void job_ref(Job *job);
/**
* Release a reference that was previously acquired with job_ref() or
* job_create(). If it's the last reference to the object, it will be freed.
*/
void job_unref(Job *job);
/**
* @job: The job that has made progress
* @done: How much progress the job made since the last call
*
* Updates the progress counter of the job.
*/
void job_progress_update(Job *job, uint64_t done);
/**
* @job: The job whose expected progress end value is set
* @remaining: Missing progress (on top of the current progress counter value)
* until the new expected end value is reached
*
* Sets the expected end value of the progress counter of a job so that a
* completion percentage can be calculated when the progress is updated.
*/
void job_progress_set_remaining(Job *job, uint64_t remaining);
/** To be called when a cancelled job is finalised. */
void job_event_cancelled(Job *job);
/** To be called when a successfully completed job is finalised. */
void job_event_completed(Job *job);
/**
* Conditionally enter the job coroutine if the job is ready to run, not
* already busy and fn() returns true. fn() is called while under the job_lock
* critical section.
*/
void job_enter_cond(Job *job, bool(*fn)(Job *job));
/**
* @job: A job that has not yet been started.
*
* Begins execution of a job.
* Takes ownership of one reference to the job object.
*/
void job_start(Job *job);
/**
* @job: The job to enter.
*
* Continue the specified job by entering the coroutine.
*/
void job_enter(Job *job);
/**
* @job: The job that is ready to pause.
*
* Pause now if job_pause() has been called. Jobs that perform lots of I/O
* must call this between requests so that the job can be paused.
*/
void coroutine_fn job_pause_point(Job *job);
/**
* @job: The job that calls the function.
*
* Yield the job coroutine.
*/
void job_yield(Job *job);
/**
* @job: The job that calls the function.
* @ns: How many nanoseconds to stop for.
*
* Put the job to sleep (assuming that it wasn't canceled) for @ns
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
* interrupt the wait.
*/
void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
/** Returns the JobType of a given Job. */
JobType job_type(const Job *job);
/** Returns the enum string for the JobType of a given Job. */
const char *job_type_str(const Job *job);
/** Returns true if the job should not be visible to the management layer. */
bool job_is_internal(Job *job);
/** Returns whether the job is scheduled for cancellation. */
bool job_is_cancelled(Job *job);
/** Returns whether the job is in a completed state. */
bool job_is_completed(Job *job);
/** Returns whether the job is ready to be completed. */
bool job_is_ready(Job *job);
/**
* Request @job to pause at the next pause point. Must be paired with
* job_resume(). If the job is supposed to be resumed by user action, call
* job_user_pause() instead.
*/
void job_pause(Job *job);
/** Resumes a @job paused with job_pause. */
void job_resume(Job *job);
/**
* Asynchronously pause the specified @job.
* Do not allow a resume until a matching call to job_user_resume.
*/
void job_user_pause(Job *job, Error **errp);
/** Returns true if the job is user-paused. */
bool job_user_paused(Job *job);
/**
* Resume the specified @job.
* Must be paired with a preceding job_user_pause.
*/
void job_user_resume(Job *job, Error **errp);
/*
* Drain any activities as required to ensure progress. This can be called in a
* loop to synchronously complete a job.
*/
void job_drain(Job *job);
/**
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
*/
Job *job_next(Job *job);
/**
* Get the job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
*/
Job *job_get(const char *id);
/**
* Check whether the verb @verb can be applied to @job in its current state.
* Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
* returned.
*/
int job_apply_verb(Job *job, JobVerb verb, Error **errp);
/** The @job could not be started, free it. */
void job_early_fail(Job *job);
/** Moves the @job from RUNNING to READY */
void job_transition_to_ready(Job *job);
/**
* @job: The job being completed.
* @ret: The status code.
*
* Marks @job as completed. If @ret is non-zero, the job transaction it is part
* of is aborted. If @ret is zero, the job moves into the WAITING state. If it
* is the last job to complete in its transaction, all jobs in the transaction
* move from WAITING to PENDING.
*/
void job_completed(Job *job, int ret);
/** Asynchronously complete the specified @job. */
void job_complete(Job *job, Error **errp);
/**
* Asynchronously cancel the specified @job. If @force is true, the job should
* be cancelled immediately without waiting for a consistent state.
*/
void job_cancel(Job *job, bool force);
/**
* Cancels the specified job like job_cancel(), but may refuse to do so if the
* operation isn't meaningful in the current state of the job.
*/
void job_user_cancel(Job *job, bool force, Error **errp);
/**
* Synchronously cancel the @job. The completion callback is called
* before the function returns. The job may actually complete
* instead of canceling itself; the circumstances under which this
* happens depend on the kind of job that is active.
*
* Returns the return value from the job if the job actually completed
* during the call, or -ECANCELED if it was canceled.
*/
int job_cancel_sync(Job *job);
/** Synchronously cancels all jobs using job_cancel_sync(). */
void job_cancel_sync_all(void);
/**
* @job: The job to be completed.
* @errp: Error object which may be set by job_complete(); this is not
* necessarily set on every error, the job return value has to be
* checked as well.
*
* Synchronously complete the job. The completion callback is called before the
* function returns, unless it is NULL (which is permissible when using this
* function).
*
* Returns the return value from the job.
*/
int job_complete_sync(Job *job, Error **errp);
/**
* For a @job that has finished its work and is pending awaiting explicit
* acknowledgement to commit its work, this will commit that work.
*
* FIXME: Make the below statement universally true:
* For jobs that support the manual workflow mode, all graph changes that occur
* as a result will occur after this command and before a successful reply.
*/
void job_finalize(Job *job, Error **errp);
/**
* Remove the concluded @job from the query list and resets the passed pointer
* to %NULL. Returns an error if the job is not actually concluded.
*/
void job_dismiss(Job **job, Error **errp);
typedef void JobDeferToMainLoopFn(Job *job, void *opaque);
/**
* @job: The job
* @fn: The function to run in the main loop
* @opaque: The opaque value that is passed to @fn
*
* This function must be called by the main job coroutine just before it
* returns. @fn is executed in the main loop with the job AioContext acquired.
*
* Block jobs must call bdrv_unref(), bdrv_close(), and anything that uses
* bdrv_drain_all() in the main loop.
*
* The @job AioContext is held while @fn executes.
*/
void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
/**
* Synchronously finishes the given @job. If @finish is given, it is called to
* trigger completion or cancellation of the job.
*
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
* cancelled before completing, and -errno in other error cases.
*/
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
#endif

188
job-qmp.c Normal file
View File

@ -0,0 +1,188 @@
/*
* QMP interface for background jobs
*
* Copyright (c) 2011 IBM Corp.
* Copyright (c) 2012, 2018 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/job.h"
#include "qapi/qapi-commands-job.h"
#include "qapi/error.h"
#include "trace-root.h"
/* Get a job using its ID and acquire its AioContext */
static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
{
Job *job;
*aio_context = NULL;
job = job_get(id);
if (!job) {
error_setg(errp, "Job not found");
return NULL;
}
*aio_context = job->aio_context;
aio_context_acquire(*aio_context);
return job;
}
void qmp_job_cancel(const char *id, Error **errp)
{
AioContext *aio_context;
Job *job = find_job(id, &aio_context, errp);
if (!job) {
return;
}
trace_qmp_job_cancel(job);
job_user_cancel(job, true, errp);
aio_context_release(aio_context);
}
void qmp_job_pause(const char *id, Error **errp)
{
AioContext *aio_context;
Job *job = find_job(id, &aio_context, errp);
if (!job) {
return;
}
trace_qmp_job_pause(job);
job_user_pause(job, errp);
aio_context_release(aio_context);
}
void qmp_job_resume(const char *id, Error **errp)
{
AioContext *aio_context;
Job *job = find_job(id, &aio_context, errp);
if (!job) {
return;
}
trace_qmp_job_resume(job);
job_user_resume(job, errp);
aio_context_release(aio_context);
}
void qmp_job_complete(const char *id, Error **errp)
{
AioContext *aio_context;
Job *job = find_job(id, &aio_context, errp);
if (!job) {
return;
}
trace_qmp_job_complete(job);
job_complete(job, errp);
aio_context_release(aio_context);
}
void qmp_job_finalize(const char *id, Error **errp)
{
AioContext *aio_context;
Job *job = find_job(id, &aio_context, errp);
if (!job) {
return;
}
trace_qmp_job_finalize(job);
job_finalize(job, errp);
aio_context_release(aio_context);
}
void qmp_job_dismiss(const char *id, Error **errp)
{
AioContext *aio_context;
Job *job = find_job(id, &aio_context, errp);
if (!job) {
return;
}
trace_qmp_job_dismiss(job);
job_dismiss(&job, errp);
aio_context_release(aio_context);
}
static JobInfo *job_query_single(Job *job, Error **errp)
{
JobInfo *info;
const char *errmsg = NULL;
assert(!job_is_internal(job));
if (job->ret < 0) {
errmsg = strerror(-job->ret);
}
info = g_new(JobInfo, 1);
*info = (JobInfo) {
.id = g_strdup(job->id),
.type = job_type(job),
.status = job->status,
.current_progress = job->progress_current,
.total_progress = job->progress_total,
.has_error = !!errmsg,
.error = g_strdup(errmsg),
};
return info;
}
JobInfoList *qmp_query_jobs(Error **errp)
{
JobInfoList *head = NULL, **p_next = &head;
Job *job;
for (job = job_next(NULL); job; job = job_next(job)) {
JobInfoList *elem;
AioContext *aio_context;
if (job_is_internal(job)) {
continue;
}
elem = g_new0(JobInfoList, 1);
aio_context = job->aio_context;
aio_context_acquire(aio_context);
elem->value = job_query_single(job, errp);
aio_context_release(aio_context);
if (!elem->value) {
g_free(elem);
qapi_free_JobInfoList(head);
return NULL;
}
*p_next = elem;
p_next = &elem->next;
}
return head;
}

1000
job.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -6,6 +6,7 @@
{ 'include': 'common.json' } { 'include': 'common.json' }
{ 'include': 'crypto.json' } { 'include': 'crypto.json' }
{ 'include': 'job.json' }
{ 'include': 'sockets.json' } { 'include': 'sockets.json' }
## ##
@ -1049,95 +1050,6 @@
{ 'enum': 'MirrorSyncMode', { 'enum': 'MirrorSyncMode',
'data': ['top', 'full', 'none', 'incremental'] } 'data': ['top', 'full', 'none', 'incremental'] }
##
# @BlockJobType:
#
# Type of a block job.
#
# @commit: block commit job type, see "block-commit"
#
# @stream: block stream job type, see "block-stream"
#
# @mirror: drive mirror job type, see "drive-mirror"
#
# @backup: drive backup job type, see "drive-backup"
#
# Since: 1.7
##
{ 'enum': 'BlockJobType',
'data': ['commit', 'stream', 'mirror', 'backup'] }
##
# @BlockJobVerb:
#
# Represents command verbs that can be applied to a blockjob.
#
# @cancel: see @block-job-cancel
#
# @pause: see @block-job-pause
#
# @resume: see @block-job-resume
#
# @set-speed: see @block-job-set-speed
#
# @complete: see @block-job-complete
#
# @dismiss: see @block-job-dismiss
#
# @finalize: see @block-job-finalize
#
# Since: 2.12
##
{ 'enum': 'BlockJobVerb',
'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss',
'finalize' ] }
##
# @BlockJobStatus:
#
# Indicates the present state of a given blockjob in its lifetime.
#
# @undefined: Erroneous, default state. Should not ever be visible.
#
# @created: The job has been created, but not yet started.
#
# @running: The job is currently running.
#
# @paused: The job is running, but paused. The pause may be requested by
# either the QMP user or by internal processes.
#
# @ready: The job is running, but is ready for the user to signal completion.
# This is used for long-running jobs like mirror that are designed to
# run indefinitely.
#
# @standby: The job is ready, but paused. This is nearly identical to @paused.
# The job may return to @ready or otherwise be canceled.
#
# @waiting: The job is waiting for other jobs in the transaction to converge
# to the waiting state. This status will likely not be visible for
# the last job in a transaction.
#
# @pending: The job has finished its work, but has finalization steps that it
# needs to make prior to completing. These changes may require
# manual intervention by the management process if manual was set
# to true. These changes may still fail.
#
# @aborting: The job is in the process of being aborted, and will finish with
# an error. The job will afterwards report that it is @concluded.
# This status may not be visible to the management process.
#
# @concluded: The job has finished all work. If manual was set to true, the job
# will remain in the query list until it is dismissed.
#
# @null: The job is in the process of being dismantled. This state should not
# ever be visible externally.
#
# Since: 2.12
##
{ 'enum': 'BlockJobStatus',
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
'waiting', 'pending', 'aborting', 'concluded', 'null' ] }
## ##
# @BlockJobInfo: # @BlockJobInfo:
# #
@ -1148,7 +1060,12 @@
# @device: The job identifier. Originally the device name but other # @device: The job identifier. Originally the device name but other
# values are allowed since QEMU 2.7 # values are allowed since QEMU 2.7
# #
# @len: the maximum progress value # @len: Estimated @offset value at the completion of the job. This value can
# arbitrarily change while the job is running, in both directions.
#
# @offset: Progress made until now. The unit is arbitrary and the value can
# only meaningfully be used for the ratio of @offset to @len. The
# value is monotonically increasing.
# #
# @busy: false if the job is known to be in a quiescent state, with # @busy: false if the job is known to be in a quiescent state, with
# no pending I/O. Since 1.3. # no pending I/O. Since 1.3.
@ -1156,8 +1073,6 @@
# @paused: whether the job is paused or, if @busy is true, will # @paused: whether the job is paused or, if @busy is true, will
# pause itself as soon as possible. Since 1.3. # pause itself as soon as possible. Since 1.3.
# #
# @offset: the current progress value
#
# @speed: the rate limit, bytes per second # @speed: the rate limit, bytes per second
# #
# @io-status: the status of the job (since 1.3) # @io-status: the status of the job (since 1.3)
@ -1181,7 +1096,7 @@
'data': {'type': 'str', 'device': 'str', 'len': 'int', 'data': {'type': 'str', 'device': 'str', 'len': 'int',
'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int', 'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int',
'io-status': 'BlockDeviceIoStatus', 'ready': 'bool', 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool',
'status': 'BlockJobStatus', 'status': 'JobStatus',
'auto-finalize': 'bool', 'auto-dismiss': 'bool', 'auto-finalize': 'bool', 'auto-dismiss': 'bool',
'*error': 'str' } } '*error': 'str' } }
@ -2338,8 +2253,7 @@
# #
# This command returns immediately after marking the active background block # This command returns immediately after marking the active background block
# operation for pausing. It is an error to call this command if no # operation for pausing. It is an error to call this command if no
# operation is in progress. Pausing an already paused job has no cumulative # operation is in progress or if the job is already paused.
# effect; a single block-job-resume command will resume the job.
# #
# The operation will pause as soon as possible. No event is emitted when # The operation will pause as soon as possible. No event is emitted when
# the operation is actually paused. Cancelling a paused job automatically # the operation is actually paused. Cancelling a paused job automatically
@ -2363,7 +2277,7 @@
# #
# This command returns immediately after resuming a paused background block # This command returns immediately after resuming a paused background block
# operation. It is an error to call this command if no operation is in # operation. It is an error to call this command if no operation is in
# progress. Resuming an already running job is not an error. # progress or if the job is not paused.
# #
# This command also clears the error status of the job. # This command also clears the error status of the job.
# #
@ -2414,7 +2328,7 @@
# QEMU 2.12+ job lifetime management semantics. # QEMU 2.12+ job lifetime management semantics.
# #
# This command will refuse to operate on any job that has not yet reached # This command will refuse to operate on any job that has not yet reached
# its terminal state, BLOCK_JOB_STATUS_CONCLUDED. For jobs that make use of # its terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of the
# BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need # BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need
# to be used as appropriate. # to be used as appropriate.
# #
@ -4497,7 +4411,7 @@
# #
## ##
{ 'event': 'BLOCK_JOB_COMPLETED', { 'event': 'BLOCK_JOB_COMPLETED',
'data': { 'type' : 'BlockJobType', 'data': { 'type' : 'JobType',
'device': 'str', 'device': 'str',
'len' : 'int', 'len' : 'int',
'offset': 'int', 'offset': 'int',
@ -4533,7 +4447,7 @@
# #
## ##
{ 'event': 'BLOCK_JOB_CANCELLED', { 'event': 'BLOCK_JOB_CANCELLED',
'data': { 'type' : 'BlockJobType', 'data': { 'type' : 'JobType',
'device': 'str', 'device': 'str',
'len' : 'int', 'len' : 'int',
'offset': 'int', 'offset': 'int',
@ -4598,7 +4512,7 @@
# #
## ##
{ 'event': 'BLOCK_JOB_READY', { 'event': 'BLOCK_JOB_READY',
'data': { 'type' : 'BlockJobType', 'data': { 'type' : 'JobType',
'device': 'str', 'device': 'str',
'len' : 'int', 'len' : 'int',
'offset': 'int', 'offset': 'int',
@ -4625,7 +4539,7 @@
# #
## ##
{ 'event': 'BLOCK_JOB_PENDING', { 'event': 'BLOCK_JOB_PENDING',
'data': { 'type' : 'BlockJobType', 'data': { 'type' : 'JobType',
'id' : 'str' } } 'id' : 'str' } }
## ##

253
qapi/job.json Normal file
View File

@ -0,0 +1,253 @@
# -*- Mode: Python -*-
##
# == Background jobs
##
##
# @JobType:
#
# Type of a background job.
#
# @commit: block commit job type, see "block-commit"
#
# @stream: block stream job type, see "block-stream"
#
# @mirror: drive mirror job type, see "drive-mirror"
#
# @backup: drive backup job type, see "drive-backup"
#
# Since: 1.7
##
{ 'enum': 'JobType',
'data': ['commit', 'stream', 'mirror', 'backup'] }
##
# @JobStatus:
#
# Indicates the present state of a given job in its lifetime.
#
# @undefined: Erroneous, default state. Should not ever be visible.
#
# @created: The job has been created, but not yet started.
#
# @running: The job is currently running.
#
# @paused: The job is running, but paused. The pause may be requested by
# either the QMP user or by internal processes.
#
# @ready: The job is running, but is ready for the user to signal completion.
# This is used for long-running jobs like mirror that are designed to
# run indefinitely.
#
# @standby: The job is ready, but paused. This is nearly identical to @paused.
# The job may return to @ready or otherwise be canceled.
#
# @waiting: The job is waiting for other jobs in the transaction to converge
# to the waiting state. This status will likely not be visible for
# the last job in a transaction.
#
# @pending: The job has finished its work, but has finalization steps that it
# needs to make prior to completing. These changes may require
# manual intervention by the management process if manual was set
# to true. These changes may still fail.
#
# @aborting: The job is in the process of being aborted, and will finish with
# an error. The job will afterwards report that it is @concluded.
# This status may not be visible to the management process.
#
# @concluded: The job has finished all work. If manual was set to true, the job
# will remain in the query list until it is dismissed.
#
# @null: The job is in the process of being dismantled. This state should not
# ever be visible externally.
#
# Since: 2.12
##
{ 'enum': 'JobStatus',
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
'waiting', 'pending', 'aborting', 'concluded', 'null' ] }
##
# @JobVerb:
#
# Represents command verbs that can be applied to a job.
#
# @cancel: see @block-job-cancel
#
# @pause: see @block-job-pause
#
# @resume: see @block-job-resume
#
# @set-speed: see @block-job-set-speed
#
# @complete: see @block-job-complete
#
# @dismiss: see @block-job-dismiss
#
# @finalize: see @block-job-finalize
#
# Since: 2.12
##
{ 'enum': 'JobVerb',
'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss',
'finalize' ] }
##
# @JOB_STATUS_CHANGE:
#
# Emitted when a job transitions to a different status.
#
# @id: The job identifier
# @status: The new job status
#
# Since: 2.13
##
{ 'event': 'JOB_STATUS_CHANGE',
'data': { 'id': 'str',
'status': 'JobStatus' } }
##
# @job-pause:
#
# Pause an active job.
#
# This command returns immediately after marking the active job for pausing.
# Pausing an already paused job is an error.
#
# The job will pause as soon as possible, which means transitioning into the
# PAUSED state if it was RUNNING, or into STANDBY if it was READY. The
# corresponding JOB_STATUS_CHANGE event will be emitted.
#
# Cancelling a paused job automatically resumes it.
#
# @id: The job identifier.
#
# Since: 2.13
##
{ 'command': 'job-pause', 'data': { 'id': 'str' } }
##
# @job-resume:
#
# Resume a paused job.
#
# This command returns immediately after resuming a paused job. Resuming an
# already running job is an error.
#
# @id : The job identifier.
#
# Since: 2.13
##
{ 'command': 'job-resume', 'data': { 'id': 'str' } }
##
# @job-cancel:
#
# Instruct an active background job to cancel at the next opportunity.
# This command returns immediately after marking the active job for
# cancellation.
#
# The job will cancel as soon as possible and then emit a JOB_STATUS_CHANGE
# event. Usually, the status will change to ABORTING, but it is possible that
# a job successfully completes (e.g. because it was almost done and there was
# no opportunity to cancel earlier than completing the job) and transitions to
# PENDING instead.
#
# @id: The job identifier.
#
# Since: 2.13
##
{ 'command': 'job-cancel', 'data': { 'id': 'str' } }
##
# @job-complete:
#
# Manually trigger completion of an active job in the READY state.
#
# @id: The job identifier.
#
# Since: 2.13
##
{ 'command': 'job-complete', 'data': { 'id': 'str' } }
##
# @job-dismiss:
#
# Deletes a job that is in the CONCLUDED state. This command only needs to be
# run explicitly for jobs that don't have automatic dismiss enabled.
#
# This command will refuse to operate on any job that has not yet reached its
# terminal state, JOB_STATUS_CONCLUDED. For jobs that make use of JOB_READY
# event, job-cancel or job-complete will still need to be used as appropriate.
#
# @id: The job identifier.
#
# Since: 2.13
##
{ 'command': 'job-dismiss', 'data': { 'id': 'str' } }
##
# @job-finalize:
#
# Instructs all jobs in a transaction (or a single job if it is not part of any
# transaction) to finalize any graph changes and do any necessary cleanup. This
# command requires that all involved jobs are in the PENDING state.
#
# For jobs in a transaction, instructing one job to finalize will force
# ALL jobs in the transaction to finalize, so it is only necessary to instruct
# a single member job to finalize.
#
# @id: The identifier of any job in the transaction, or of a job that is not
# part of any transaction.
#
# Since: 2.13
##
{ 'command': 'job-finalize', 'data': { 'id': 'str' } }
##
# @JobInfo:
#
# Information about a job.
#
# @id: The job identifier
#
# @type: The kind of job that is being performed
#
# @status: Current job state/status
#
# @current-progress: Progress made until now. The unit is arbitrary and the
# value can only meaningfully be used for the ratio of
# @current-progress to @total-progress. The value is
# monotonically increasing.
#
# @total-progress: Estimated @current-progress value at the completion of
# the job. This value can arbitrarily change while the
# job is running, in both directions.
#
# @error: If this field is present, the job failed; if it is
# still missing in the CONCLUDED state, this indicates
# successful completion.
#
# The value is a human-readable error message to describe
# the reason for the job failure. It should not be parsed
# by applications.
#
# Since: 2.13
##
{ 'struct': 'JobInfo',
'data': { 'id': 'str', 'type': 'JobType', 'status': 'JobStatus',
'current-progress': 'int', 'total-progress': 'int',
'*error': 'str' } }
##
# @query-jobs:
#
# Return information about jobs.
#
# Returns: a list with a @JobInfo for each active job
#
# Since: 2.13
##
{ 'command': 'query-jobs', 'returns': ['JobInfo'] }

View File

@ -84,6 +84,7 @@
{ 'include': 'crypto.json' } { 'include': 'crypto.json' }
{ 'include': 'block.json' } { 'include': 'block.json' }
{ 'include': 'char.json' } { 'include': 'char.json' }
{ 'include': 'job.json' }
{ 'include': 'net.json' } { 'include': 'net.json' }
{ 'include': 'rocker.json' } { 'include': 'rocker.json' }
{ 'include': 'tpm.json' } { 'include': 'tpm.json' }

View File

@ -861,19 +861,23 @@ static void run_block_job(BlockJob *job, Error **errp)
int ret = 0; int ret = 0;
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
block_job_ref(job); job_ref(&job->job);
do { do {
float progress = 0.0f;
aio_poll(aio_context, true); aio_poll(aio_context, true);
qemu_progress_print(job->len ? if (job->job.progress_total) {
((float)job->offset / job->len * 100.f) : 0.0f, 0); progress = (float)job->job.progress_current /
} while (!job->ready && !job->completed); job->job.progress_total * 100.f;
}
qemu_progress_print(progress, 0);
} while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
if (!job->completed) { if (!job_is_completed(&job->job)) {
ret = block_job_complete_sync(job, errp); ret = job_complete_sync(&job->job, errp);
} else { } else {
ret = job->ret; ret = job->job.ret;
} }
block_job_unref(job); job_unref(&job->job);
aio_context_release(aio_context); aio_context_release(aio_context);
/* publish completion progress only when success */ /* publish completion progress only when success */
@ -1014,7 +1018,7 @@ static int img_commit(int argc, char **argv)
aio_context = bdrv_get_aio_context(bs); aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
commit_active_start("commit", bs, base_bs, BLOCK_JOB_DEFAULT, 0, commit_active_start("commit", bs, base_bs, JOB_DEFAULT, 0,
BLOCKDEV_ON_ERROR_REPORT, NULL, common_block_job_cb, BLOCKDEV_ON_ERROR_REPORT, NULL, common_block_job_cb,
&cbi, false, &local_err); &cbi, false, &local_err);
aio_context_release(aio_context); aio_context_release(aio_context);

View File

@ -482,6 +482,12 @@ static const char *socket_activation_validate_opts(const char *device,
return NULL; return NULL;
} }
static void qemu_nbd_shutdown(void)
{
job_cancel_sync_all();
bdrv_close_all();
}
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
BlockBackend *blk; BlockBackend *blk;
@ -928,7 +934,7 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
bdrv_init(); bdrv_init();
atexit(bdrv_close_all); atexit(qemu_nbd_shutdown);
srcpath = argv[optind]; srcpath = argv[optind];
if (imageOpts) { if (imageOpts) {

View File

@ -304,8 +304,7 @@ class TestParallelOps(iotests.QMPTestCase):
result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6') result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6')
self.assert_qmp(result, 'error/class', 'GenericError') self.assert_qmp(result, 'error/class', 'GenericError')
event = self.vm.get_qmp_event(wait=True) event = self.vm.event_wait(name='BLOCK_JOB_READY')
self.assertEqual(event['event'], 'BLOCK_JOB_READY')
self.assert_qmp(event, 'data/device', 'commit-drive0') self.assert_qmp(event, 'data/device', 'commit-drive0')
self.assert_qmp(event, 'data/type', 'commit') self.assert_qmp(event, 'data/type', 'commit')
self.assert_qmp_absent(event, 'data/error') self.assert_qmp_absent(event, 'data/error')
@ -565,6 +564,8 @@ class TestEIO(TestErrors):
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE) self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
self.assert_qmp(event, 'data/len', self.image_len) self.assert_qmp(event, 'data/len', self.image_len)
completed = True completed = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
self.vm.shutdown() self.vm.shutdown()
@ -596,6 +597,8 @@ class TestEIO(TestErrors):
self.assert_qmp(event, 'data/offset', self.image_len) self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len) self.assert_qmp(event, 'data/len', self.image_len)
completed = True completed = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
self.vm.shutdown() self.vm.shutdown()
@ -637,6 +640,8 @@ class TestEIO(TestErrors):
self.assert_qmp(event, 'data/offset', self.image_len) self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len) self.assert_qmp(event, 'data/len', self.image_len)
completed = True completed = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
self.vm.shutdown() self.vm.shutdown()
@ -663,6 +668,8 @@ class TestEIO(TestErrors):
self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE) self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE)
self.assert_qmp(event, 'data/len', self.image_len) self.assert_qmp(event, 'data/len', self.image_len)
completed = True completed = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
self.vm.shutdown() self.vm.shutdown()
@ -722,6 +729,8 @@ class TestENOSPC(TestErrors):
self.assert_qmp(event, 'data/offset', self.image_len) self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len) self.assert_qmp(event, 'data/len', self.image_len)
completed = True completed = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
self.vm.shutdown() self.vm.shutdown()
@ -751,7 +760,9 @@ class TestStreamStop(iotests.QMPTestCase):
time.sleep(0.1) time.sleep(0.1)
events = self.vm.get_qmp_events(wait=False) events = self.vm.get_qmp_events(wait=False)
self.assertEqual(events, [], 'unexpected QMP event: %s' % events) for e in events:
self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE')
self.assert_qmp(e, 'data/id', 'drive0')
self.cancel_and_wait(resume=True) self.cancel_and_wait(resume=True)

View File

@ -162,6 +162,8 @@ class TestSingleDrive(ImageCommitTestCase):
elif event['event'] == 'BLOCK_JOB_CANCELLED': elif event['event'] == 'BLOCK_JOB_CANCELLED':
self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/device', 'drive0')
cancelled = True cancelled = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
else: else:
self.fail("Unexpected event %s" % (event['event'])) self.fail("Unexpected event %s" % (event['event']))

View File

@ -445,6 +445,8 @@ new_state = "1"
self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/error', 'Input/output error') self.assert_qmp(event, 'data/error', 'Input/output error')
completed = True completed = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
self.vm.shutdown() self.vm.shutdown()
@ -457,6 +459,10 @@ new_state = "1"
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
event = self.vm.get_qmp_event(wait=True) event = self.vm.get_qmp_event(wait=True)
while event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
event = self.vm.get_qmp_event(wait=True)
self.assertEquals(event['event'], 'BLOCK_JOB_ERROR') self.assertEquals(event['event'], 'BLOCK_JOB_ERROR')
self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/operation', 'read') self.assert_qmp(event, 'data/operation', 'read')
@ -478,6 +484,10 @@ new_state = "1"
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
event = self.vm.get_qmp_event(wait=True) event = self.vm.get_qmp_event(wait=True)
while event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
event = self.vm.get_qmp_event(wait=True)
self.assertEquals(event['event'], 'BLOCK_JOB_ERROR') self.assertEquals(event['event'], 'BLOCK_JOB_ERROR')
self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/operation', 'read') self.assert_qmp(event, 'data/operation', 'read')
@ -608,7 +618,7 @@ new_state = "1"
on_target_error='ignore') on_target_error='ignore')
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
event = self.vm.get_qmp_event(wait=True) event = self.vm.event_wait(name='BLOCK_JOB_ERROR')
self.assertEquals(event['event'], 'BLOCK_JOB_ERROR') self.assertEquals(event['event'], 'BLOCK_JOB_ERROR')
self.assert_qmp(event, 'data/device', 'drive0') self.assert_qmp(event, 'data/device', 'drive0')
self.assert_qmp(event, 'data/operation', 'write') self.assert_qmp(event, 'data/operation', 'write')
@ -784,7 +794,12 @@ class TestGranularity(iotests.QMPTestCase):
sync='full', target=target_img, sync='full', target=target_img,
mode='absolute-paths', granularity=8192) mode='absolute-paths', granularity=8192)
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
event = self.vm.get_qmp_event(wait=60.0) event = self.vm.get_qmp_event(wait=60.0)
while event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', 'drive0')
event = self.vm.get_qmp_event(wait=60.0)
# Failures will manifest as COMPLETED/ERROR. # Failures will manifest as COMPLETED/ERROR.
self.assert_qmp(event, 'event', 'BLOCK_JOB_READY') self.assert_qmp(event, 'event', 'BLOCK_JOB_READY')
self.complete_and_wait(drive='drive0', wait_ready=False) self.complete_and_wait(drive='drive0', wait_ready=False)
@ -1015,9 +1030,9 @@ class TestOrphanedSource(iotests.QMPTestCase):
'read-only': 'on' } 'read-only': 'on' }
self.vm = iotests.VM() self.vm = iotests.VM()
self.vm.add_blockdev(self.qmp_to_opts(blk0)) self.vm.add_blockdev(self.vm.qmp_to_opts(blk0))
self.vm.add_blockdev(self.qmp_to_opts(blk1)) self.vm.add_blockdev(self.vm.qmp_to_opts(blk1))
self.vm.add_blockdev(self.qmp_to_opts(blk2)) self.vm.add_blockdev(self.vm.qmp_to_opts(blk2))
self.vm.launch() self.vm.launch()
def tearDown(self): def tearDown(self):

View File

@ -38,7 +38,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
. ./common.filter . ./common.filter
_supported_fmt qcow2 raw _supported_fmt qcow2 raw
_supported_proto file nfs _supported_proto file
_supported_os Linux _supported_os Linux
function run_qemu_img() function run_qemu_img()

View File

@ -2,10 +2,17 @@ QA output created by 094
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
*** done *** done

View File

@ -72,7 +72,7 @@ _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" "return"
_send_qemu_cmd $h "{ 'execute': 'block-commit', _send_qemu_cmd $h "{ 'execute': 'block-commit',
'arguments': { 'device': 'test', 'arguments': { 'device': 'test',
'top': '"${TEST_IMG}.snp1"' } }" "BLOCK_JOB_COMPLETED" 'top': '"${TEST_IMG}.snp1"' } }" '"status": "null"'
_cleanup_qemu _cleanup_qemu

View File

@ -11,8 +11,14 @@ virtual size: 5.0M (5242880 bytes)
=== Running QEMU Live Commit Test === === Running QEMU Live Commit Test ===
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "test"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "test"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "test"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "test"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "test", "len": 104857600, "offset": 104857600, "speed": 0, "type": "commit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "test", "len": 104857600, "offset": 104857600, "speed": 0, "type": "commit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "test"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "test"}}
=== Base image info after commit and resize === === Base image info after commit and resize ===
image: TEST_DIR/t.IMGFMT.base image: TEST_DIR/t.IMGFMT.base

View File

@ -64,7 +64,7 @@ function run_qemu()
_send_qemu_cmd $QEMU_HANDLE '' "$qmp_event" _send_qemu_cmd $QEMU_HANDLE '' "$qmp_event"
if test "$qmp_event" = BLOCK_JOB_ERROR; then if test "$qmp_event" = BLOCK_JOB_ERROR; then
_send_qemu_cmd $QEMU_HANDLE '' "BLOCK_JOB_COMPLETED" _send_qemu_cmd $QEMU_HANDLE '' '"status": "null"'
fi fi
_send_qemu_cmd $QEMU_HANDLE '{"execute":"query-block-jobs"}' "return" _send_qemu_cmd $QEMU_HANDLE '{"execute":"query-block-jobs"}' "return"
_send_qemu_cmd $QEMU_HANDLE '{"execute":"quit"}' "return" _send_qemu_cmd $QEMU_HANDLE '{"execute":"quit"}' "return"

View File

@ -6,23 +6,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -32,23 +44,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 512, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 512, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -58,23 +82,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 262144, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 262144, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -84,23 +120,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -110,23 +158,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -136,23 +196,35 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": 0, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -161,23 +233,35 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -186,23 +270,35 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -211,23 +307,35 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -236,23 +344,35 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_ERROR", "data": {"device": "src", "operation": "write", "action": "report"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror", "error": "Operation not permitted"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
{"return": []} {"return": []}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
@ -261,23 +381,37 @@ Images are identical.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
{"return": {}} {"return": {}}
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
Specify the 'raw' format explicitly to remove the restrictions. Specify the 'raw' format explicitly to remove the restrictions.
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} {"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}}
Warning: Image size mismatch! Warning: Image size mismatch!
Images are identical. Images are identical.
*** done *** done

View File

@ -151,10 +151,17 @@ class TestIncrementalBackupBase(iotests.QMPTestCase):
return self.wait_qmp_backup(kwargs['device'], error) return self.wait_qmp_backup(kwargs['device'], error)
def ignore_job_status_change_events(self):
while True:
e = self.vm.event_wait(name="JOB_STATUS_CHANGE")
if e['data']['status'] == 'null':
break
def wait_qmp_backup(self, device, error='Input/output error'): def wait_qmp_backup(self, device, error='Input/output error'):
event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
match={'data': {'device': device}}) match={'data': {'device': device}})
self.assertNotEqual(event, None) self.assertNotEqual(event, None)
self.ignore_job_status_change_events()
try: try:
failure = self.dictpath(event, 'data/error') failure = self.dictpath(event, 'data/error')
@ -172,6 +179,7 @@ class TestIncrementalBackupBase(iotests.QMPTestCase):
event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
match={'data': {'device': device}}) match={'data': {'device': device}})
self.assertNotEqual(event, None) self.assertNotEqual(event, None)
self.ignore_job_status_change_events()
def create_anchor_backup(self, drive=None): def create_anchor_backup(self, drive=None):

View File

@ -3,7 +3,7 @@ QA output created by 126
=== Testing plain files === === Testing plain files ===
Formatting 'TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864
Formatting 'TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864 Formatting 'file:TEST_DIR/a:b.IMGFMT', fmt=IMGFMT size=67108864
=== Testing relative backing filename resolution === === Testing relative backing filename resolution ===

View File

@ -5,10 +5,17 @@ Formatting 'TEST_DIR/t.IMGFMT.overlay1', fmt=IMGFMT size=65536 backing_file=TEST
wrote 42/42 bytes at offset 0 wrote 42/42 bytes at offset 0
42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "mirror"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "mirror"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
*** done *** done

View File

@ -107,7 +107,7 @@ test_blockjob \
'format': '$IMGFMT', 'format': '$IMGFMT',
'sync': 'none'}}" \ 'sync': 'none'}}" \
'return' \ 'return' \
'BLOCK_JOB_CANCELLED' '"status": "null"'
echo echo
echo '=== Testing drive-mirror ===' echo '=== Testing drive-mirror ==='
@ -124,7 +124,7 @@ test_blockjob \
'format': '$IMGFMT', 'format': '$IMGFMT',
'sync': 'none'}}" \ 'sync': 'none'}}" \
'BLOCK_JOB_READY' \ 'BLOCK_JOB_READY' \
'BLOCK_JOB_COMPLETED' '"status": "null"'
echo echo
echo '=== Testing active block-commit ===' echo '=== Testing active block-commit ==='
@ -138,7 +138,7 @@ test_blockjob \
"{'execute': 'block-commit', "{'execute': 'block-commit',
'arguments': {'job-id': 'job0', 'device': 'drv0'}}" \ 'arguments': {'job-id': 'job0', 'device': 'drv0'}}" \
'BLOCK_JOB_READY' \ 'BLOCK_JOB_READY' \
'BLOCK_JOB_COMPLETED' '"status": "null"'
echo echo
echo '=== Testing non-active block-commit ===' echo '=== Testing non-active block-commit ==='
@ -157,7 +157,7 @@ test_blockjob \
'top': '$TEST_DIR/m.$IMGFMT', 'top': '$TEST_DIR/m.$IMGFMT',
'speed': 1}}" \ 'speed': 1}}" \
'return' \ 'return' \
'BLOCK_JOB_CANCELLED' '"status": "null"'
echo echo
echo '=== Testing block-stream ===' echo '=== Testing block-stream ==='
@ -170,8 +170,7 @@ echo
$QEMU_IO -c 'write 0 1M' "$TEST_DIR/b.$IMGFMT" | _filter_qemu_io $QEMU_IO -c 'write 0 1M' "$TEST_DIR/b.$IMGFMT" | _filter_qemu_io
# With some data to stream (and @speed set to 1), block-stream will not complete # With some data to stream (and @speed set to 1), block-stream will not complete
# until we send the block-job-cancel command. Therefore, no event other than # until we send the block-job-cancel command.
# BLOCK_JOB_CANCELLED will be emitted.
test_blockjob \ test_blockjob \
"{'execute': 'block-stream', "{'execute': 'block-stream',
@ -179,7 +178,7 @@ test_blockjob \
'device': 'drv0', 'device': 'drv0',
'speed': 1}}" \ 'speed': 1}}" \
'return' \ 'return' \
'BLOCK_JOB_CANCELLED' '"status": "null"'
_cleanup_qemu _cleanup_qemu

View File

@ -8,31 +8,50 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m.
{"return": {}} {"return": {}}
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"return": {}} {"return": {}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}} {"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 0, "speed": 0, "type": "backup"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 0, "speed": 0, "type": "backup"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{"return": {}} {"return": {}}
=== Testing drive-mirror === === Testing drive-mirror ===
{"return": {}} {"return": {}}
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
{"return": {}} {"return": {}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}} {"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{"return": {}} {"return": {}}
=== Testing active block-commit === === Testing active block-commit ===
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{"return": {}} {"return": {}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}} {"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{"return": {}} {"return": {}}
=== Testing non-active block-commit === === Testing non-active block-commit ===
@ -40,10 +59,15 @@ Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
wrote 1048576/1048576 bytes at offset 0 wrote 1048576/1048576 bytes at offset 0
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"return": {}} {"return": {}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}} {"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "commit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "commit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{"return": {}} {"return": {}}
=== Testing block-stream === === Testing block-stream ===
@ -51,9 +75,14 @@ wrote 1048576/1048576 bytes at offset 0
wrote 1048576/1048576 bytes at offset 0 wrote 1048576/1048576 bytes at offset 0
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{"return": {}} {"return": {}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}} {"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "stream"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "stream"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
{"return": {}} {"return": {}}
*** done *** done

View File

@ -93,7 +93,7 @@ _send_qemu_cmd $h "{ 'execute': 'block-job-complete',
'arguments': { 'arguments': {
'device': 'virtio0' 'device': 'virtio0'
} }
}" "COMPLETED" }" '"status": "null"'
echo echo
echo === Performing Live Snapshot 2 === echo === Performing Live Snapshot 2 ===

View File

@ -12,10 +12,17 @@ Formatting 'TEST_DIR/tmp.qcow2', fmt=qcow2 size=536870912 backing_file=TEST_DIR/
=== Performing block-commit on active layer === === Performing block-commit on active layer ===
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{"return": {}} {"return": {}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "virtio0"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "virtio0"}}
=== Performing Live Snapshot 2 === === Performing Live Snapshot 2 ===

View File

@ -63,7 +63,7 @@ class BaseClass(iotests.QMPTestCase):
'driver': iotests.imgfmt, 'driver': iotests.imgfmt,
'file': {'driver': 'file', 'file': {'driver': 'file',
'filename': source_img}} 'filename': source_img}}
self.vm.add_blockdev(self.qmp_to_opts(blockdev)) self.vm.add_blockdev(self.vm.qmp_to_opts(blockdev))
self.vm.add_device('virtio-blk,id=qdev0,drive=source') self.vm.add_device('virtio-blk,id=qdev0,drive=source')
self.vm.launch() self.vm.launch()

View File

@ -119,7 +119,7 @@ _send_qemu_cmd $QEMU_HANDLE \
_send_qemu_cmd $QEMU_HANDLE \ _send_qemu_cmd $QEMU_HANDLE \
'' \ '' \
'BLOCK_JOB_COMPLETED' '"status": "null"'
# Remove the source images # Remove the source images
rm -f "$TEST_IMG{,.backing,.overlay}" rm -f "$TEST_IMG{,.backing,.overlay}"

View File

@ -12,13 +12,20 @@ wrote 131072/131072 bytes at offset 131072
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""} {"return": ""}
Formatting 'TEST_DIR/t.IMGFMT.target.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.target Formatting 'TEST_DIR/t.IMGFMT.target.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.target
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "source"}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "source", "len": 131072, "offset": 131072, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "source", "len": 131072, "offset": 131072, "speed": 0, "type": "mirror"}}
wrote 65536/65536 bytes at offset 196608 wrote 65536/65536 bytes at offset 196608
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""} {"return": ""}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "source", "len": 196608, "offset": 196608, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "source", "len": 196608, "offset": 196608, "speed": 0, "type": "mirror"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "source"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "source"}}
read 65536/65536 bytes at offset 0 read 65536/65536 bytes at offset 0
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)

View File

@ -27,8 +27,6 @@ echo "QA output created by $seq"
here=`pwd` here=`pwd`
status=1 # failure is the default! status=1 # failure is the default!
MIG_SOCKET="${TEST_DIR}/migrate"
_cleanup() _cleanup()
{ {
rm -f "${TEST_IMG}.mid" rm -f "${TEST_IMG}.mid"
@ -118,8 +116,10 @@ _send_qemu_cmd $h \
# If we don't sleep here 'quit' command races with disk I/O # If we don't sleep here 'quit' command races with disk I/O
sleep 0.5 sleep 0.5
# Ignore the JOB_STATUS_CHANGE events while shutting down the VM. Depending on
# the timing, jobs may or may not transition through a paused state.
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return" _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
echo echo
echo === Start active commit job and exit qemu === echo === Start active commit job and exit qemu ===
@ -141,7 +141,7 @@ _send_qemu_cmd $h \
sleep 0.5 sleep 0.5
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return" _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
echo echo
echo === Start mirror job and exit qemu === echo === Start mirror job and exit qemu ===
@ -166,7 +166,7 @@ _send_qemu_cmd $h \
sleep 0.5 sleep 0.5
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return" _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
echo echo
echo === Start backup job and exit qemu === echo === Start backup job and exit qemu ===
@ -190,7 +190,7 @@ _send_qemu_cmd $h \
sleep 0.5 sleep 0.5
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return" _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
echo echo
echo === Start streaming job and exit qemu === echo === Start streaming job and exit qemu ===
@ -211,7 +211,7 @@ _send_qemu_cmd $h \
sleep 0.5 sleep 0.5
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return" _send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu wait=1 _cleanup_qemu | grep -v 'JOB_STATUS_CHANGE'
_check_test_img _check_test_img

View File

@ -17,6 +17,8 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.q
=== Start commit job and exit qemu === === Start commit job and exit qemu ===
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}} {"return": {}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@ -25,6 +27,8 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.q
=== Start active commit job and exit qemu === === Start active commit job and exit qemu ===
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}} {"return": {}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@ -34,6 +38,8 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.q
{"return": {}} {"return": {}}
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16 Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}} {"return": {}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@ -43,6 +49,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 l
{"return": {}} {"return": {}}
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16 Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}} {"return": {}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
@ -51,6 +59,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 l
=== Start streaming job and exit qemu === === Start streaming job and exit qemu ===
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
{"return": {}} {"return": {}}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}

View File

@ -27,8 +27,6 @@ echo "QA output created by $seq"
here=`pwd` here=`pwd`
status=1 # failure is the default! status=1 # failure is the default!
MIG_SOCKET="${TEST_DIR}/migrate"
_cleanup() _cleanup()
{ {
rm -f "${TEST_IMG}.mid" rm -f "${TEST_IMG}.mid"
@ -83,7 +81,7 @@ _send_qemu_cmd $h \
'device': 'top', 'device': 'top',
'base':'$TEST_IMG.base', 'base':'$TEST_IMG.base',
'top': '$TEST_IMG.mid' } }" \ 'top': '$TEST_IMG.mid' } }" \
"BLOCK_JOB_COMPLETED" '"status": "null"'
_send_qemu_cmd $h "" "^}" _send_qemu_cmd $h "" "^}"
echo echo
@ -131,7 +129,7 @@ _send_qemu_cmd $h \
'device': 'top', 'device': 'top',
'base':'$TEST_IMG.base', 'base':'$TEST_IMG.base',
'top': '$TEST_IMG.mid' } }" \ 'top': '$TEST_IMG.mid' } }" \
"BLOCK_JOB_COMPLETED" '"status": "null"'
_send_qemu_cmd $h "" "^}" _send_qemu_cmd $h "" "^}"
echo echo

View File

@ -15,10 +15,54 @@ wrote 65536/65536 bytes at offset 1048576
=== Perform commit job === === Perform commit job ===
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "created",
"id": "commit0"
}
}
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "running",
"id": "commit0"
}
}
{ {
"return": { "return": {
} }
} }
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "waiting",
"id": "commit0"
}
}
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "pending",
"id": "commit0"
}
}
{ {
"timestamp": { "timestamp": {
"seconds": TIMESTAMP, "seconds": TIMESTAMP,
@ -33,6 +77,28 @@ wrote 65536/65536 bytes at offset 1048576
"type": "commit" "type": "commit"
} }
} }
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "concluded",
"id": "commit0"
}
}
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "null",
"id": "commit0"
}
}
=== Check that both top and top2 point to base now === === Check that both top and top2 point to base now ===
@ -355,10 +421,54 @@ wrote 65536/65536 bytes at offset 1048576
=== Perform commit job === === Perform commit job ===
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "created",
"id": "commit0"
}
}
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "running",
"id": "commit0"
}
}
{ {
"return": { "return": {
} }
} }
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "waiting",
"id": "commit0"
}
}
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "pending",
"id": "commit0"
}
}
{ {
"timestamp": { "timestamp": {
"seconds": TIMESTAMP, "seconds": TIMESTAMP,
@ -373,6 +483,28 @@ wrote 65536/65536 bytes at offset 1048576
"type": "commit" "type": "commit"
} }
} }
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "concluded",
"id": "commit0"
}
}
{
"timestamp": {
"seconds": TIMESTAMP,
"microseconds": TIMESTAMP
},
"event": "JOB_STATUS_CHANGE",
"data": {
"status": "null",
"id": "commit0"
}
}
=== Check that both top and top2 point to base now === === Check that both top and top2 point to base now ===

209
tests/qemu-iotests/219 Executable file
View File

@ -0,0 +1,209 @@
#!/usr/bin/env python
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Creator/Owner: Kevin Wolf <kwolf@redhat.com>
#
# Check using the job-* QMP commands with block jobs
import iotests
iotests.verify_image_format(supported_fmts=['qcow2'])
def pause_wait(vm, job_id):
with iotests.Timeout(3, "Timeout waiting for job to pause"):
while True:
result = vm.qmp('query-jobs')
for job in result['return']:
if job['id'] == job_id and job['status'] in ['paused', 'standby']:
return job
# Test that block-job-pause/resume and job-pause/resume can be mixed
def test_pause_resume(vm):
for pause_cmd, pause_arg in [('block-job-pause', 'device'),
('job-pause', 'id')]:
for resume_cmd, resume_arg in [('block-job-resume', 'device'),
('job-resume', 'id')]:
iotests.log('=== Testing %s/%s ===' % (pause_cmd, resume_cmd))
iotests.log(vm.qmp(pause_cmd, **{pause_arg: 'job0'}))
pause_wait(vm, 'job0')
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(vm.qmp('query-jobs'))
iotests.log(vm.qmp(resume_cmd, **{resume_arg: 'job0'}))
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(vm.qmp('query-jobs'))
def test_job_lifecycle(vm, job, job_args, has_ready=False):
iotests.log('')
iotests.log('')
iotests.log('Starting block job: %s (auto-finalize: %s; auto-dismiss: %s)' %
(job,
job_args.get('auto-finalize', True),
job_args.get('auto-dismiss', True)))
iotests.log(vm.qmp(job, job_id='job0', **job_args))
# Depending on the storage, the first request may or may not have completed
# yet, so filter out the progress. Later query-job calls don't need the
# filtering because the progress is made deterministic by the block job
# speed
result = vm.qmp('query-jobs')
for j in result['return']:
del j['current-progress']
iotests.log(result)
# undefined -> created -> running
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
# RUNNING state:
# pause/resume should work, complete/finalize/dismiss should error out
iotests.log('')
iotests.log('Pause/resume in RUNNING')
test_pause_resume(vm)
iotests.log(vm.qmp('job-complete', id='job0'))
iotests.log(vm.qmp('job-finalize', id='job0'))
iotests.log(vm.qmp('job-dismiss', id='job0'))
iotests.log(vm.qmp('block-job-complete', device='job0'))
iotests.log(vm.qmp('block-job-finalize', id='job0'))
iotests.log(vm.qmp('block-job-dismiss', id='job0'))
# Let the job complete (or transition to READY if it supports that)
iotests.log(vm.qmp('block-job-set-speed', device='job0', speed=0))
if has_ready:
iotests.log('')
iotests.log('Waiting for READY state...')
vm.event_wait('BLOCK_JOB_READY')
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(vm.qmp('query-jobs'))
# READY state:
# pause/resume/complete should work, finalize/dismiss should error out
iotests.log('')
iotests.log('Pause/resume in READY')
test_pause_resume(vm)
iotests.log(vm.qmp('job-finalize', id='job0'))
iotests.log(vm.qmp('job-dismiss', id='job0'))
iotests.log(vm.qmp('block-job-finalize', id='job0'))
iotests.log(vm.qmp('block-job-dismiss', id='job0'))
# Transition to WAITING
iotests.log(vm.qmp('job-complete', id='job0'))
# Move to WAITING and PENDING state
iotests.log('')
iotests.log('Waiting for PENDING state...')
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
if not job_args.get('auto-finalize', True):
# PENDING state:
# finalize should work, pause/complete/dismiss should error out
iotests.log(vm.qmp('query-jobs'))
iotests.log(vm.qmp('job-pause', id='job0'))
iotests.log(vm.qmp('job-complete', id='job0'))
iotests.log(vm.qmp('job-dismiss', id='job0'))
iotests.log(vm.qmp('block-job-pause', device='job0'))
iotests.log(vm.qmp('block-job-complete', device='job0'))
iotests.log(vm.qmp('block-job-dismiss', id='job0'))
# Transition to CONCLUDED
iotests.log(vm.qmp('job-finalize', id='job0'))
# Move to CONCLUDED state
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
if not job_args.get('auto-dismiss', True):
# CONCLUDED state:
# dismiss should work, pause/complete/finalize should error out
iotests.log(vm.qmp('query-jobs'))
iotests.log(vm.qmp('job-pause', id='job0'))
iotests.log(vm.qmp('job-complete', id='job0'))
iotests.log(vm.qmp('job-finalize', id='job0'))
iotests.log(vm.qmp('block-job-pause', device='job0'))
iotests.log(vm.qmp('block-job-complete', device='job0'))
iotests.log(vm.qmp('block-job-finalize', id='job0'))
# Transition to NULL
iotests.log(vm.qmp('job-dismiss', id='job0'))
# Move to NULL state
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(vm.qmp('query-jobs'))
with iotests.FilePath('disk.img') as disk_path, \
iotests.FilePath('copy.img') as copy_path, \
iotests.VM() as vm:
img_size = '4M'
iotests.qemu_img_create('-f', iotests.imgfmt, disk_path, img_size)
iotests.qemu_io('-c', 'write 0 %s' % (img_size),
'-f', iotests.imgfmt, disk_path)
iotests.log('Launching VM...')
vm.add_blockdev(vm.qmp_to_opts({
'driver': iotests.imgfmt,
'node-name': 'drive0-node',
'file': {
'driver': 'file',
'filename': disk_path,
},
}))
vm.launch()
# In order to keep things deterministic (especially progress in query-job,
# but related to this also automatic state transitions like job
# completion), but still get pause points often enough to avoid making this
# test very slow, it's important to have the right ratio between speed and
# buf_size.
#
# For backup, buf_size is hard-coded to the source image cluster size (64k),
# so we'll pick the same for mirror. The slice time, i.e. the granularity
# of the rate limiting is 100ms. With a speed of 256k per second, we can
# get four pause points per second. This gives us 250ms per iteration,
# which should be enough to stay deterministic.
test_job_lifecycle(vm, 'drive-mirror', has_ready=True, job_args={
'device': 'drive0-node',
'target': copy_path,
'sync': 'full',
'speed': 262144,
'buf_size': 65536,
})
for auto_finalize in [True, False]:
for auto_dismiss in [True, False]:
test_job_lifecycle(vm, 'drive-backup', job_args={
'device': 'drive0-node',
'target': copy_path,
'sync': 'full',
'speed': 262144,
'auto-finalize': auto_finalize,
'auto-dismiss': auto_dismiss,
})
vm.shutdown()

327
tests/qemu-iotests/219.out Normal file
View File

@ -0,0 +1,327 @@
Launching VM...
Starting block job: drive-mirror (auto-finalize: True; auto-dismiss: True)
{u'return': {}}
{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
Pause/resume in RUNNING
=== Testing block-job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
=== Testing block-job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
=== Testing job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
=== Testing job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'return': {}}
Waiting for READY state...
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
Pause/resume in READY
=== Testing block-job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
=== Testing block-job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
=== Testing job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
=== Testing job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'standby', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'standby', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'ready', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'ready', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'mirror'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'ready' cannot accept command verb 'dismiss'"}}
{u'return': {}}
Waiting for PENDING state...
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': []}
Starting block job: drive-backup (auto-finalize: True; auto-dismiss: True)
{u'return': {}}
{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
Pause/resume in RUNNING
=== Testing block-job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing block-job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'return': {}}
Waiting for PENDING state...
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': []}
Starting block job: drive-backup (auto-finalize: True; auto-dismiss: False)
{u'return': {}}
{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
Pause/resume in RUNNING
=== Testing block-job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing block-job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'return': {}}
Waiting for PENDING state...
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'concluded', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': []}
Starting block job: drive-backup (auto-finalize: False; auto-dismiss: True)
{u'return': {}}
{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
Pause/resume in RUNNING
=== Testing block-job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing block-job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'return': {}}
Waiting for PENDING state...
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'pending', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': []}
Starting block job: drive-backup (auto-finalize: False; auto-dismiss: False)
{u'return': {}}
{u'return': [{u'status': u'running', u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'created', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
Pause/resume in RUNNING
=== Testing block-job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 65536, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing block-job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 131072, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/block-job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 196608, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
=== Testing job-pause/job-resume ===
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'paused', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'paused', u'current-progress': 262144, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'running', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'running', u'current-progress': 327680, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'running' cannot accept command verb 'dismiss'"}}
{u'return': {}}
Waiting for PENDING state...
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'waiting', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'pending', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'pending', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'pending' cannot accept command verb 'dismiss'"}}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'concluded', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': [{u'status': u'concluded', u'current-progress': 4194304, u'total-progress': 4194304, u'id': u'job0', u'type': u'backup'}]}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'pause'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'complete'"}}
{u'error': {u'class': u'GenericError', u'desc': u"Job 'job0' in state 'concluded' cannot accept command verb 'finalize'"}}
{u'return': {}}
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'null', u'id': u'job0'}, u'event': u'JOB_STATUS_CHANGE'}
{u'return': []}

View File

@ -119,7 +119,8 @@ _filter_actual_image_size()
# replace driver-specific options in the "Formatting..." line # replace driver-specific options in the "Formatting..." line
_filter_img_create() _filter_img_create()
{ {
sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \ sed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \
-e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
-e "s#$TEST_DIR#TEST_DIR#g" \ -e "s#$TEST_DIR#TEST_DIR#g" \
-e "s#$IMGFMT#IMGFMT#g" \ -e "s#$IMGFMT#IMGFMT#g" \
-e 's#nbd:127.0.0.1:10810#TEST_DIR/t.IMGFMT#g' \ -e 's#nbd:127.0.0.1:10810#TEST_DIR/t.IMGFMT#g' \
@ -154,7 +155,8 @@ _filter_img_info()
discard=0 discard=0
regex_json_spec_start='^ *"format-specific": \{' regex_json_spec_start='^ *"format-specific": \{'
sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \ sed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \
-e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
-e "s#$TEST_DIR#TEST_DIR#g" \ -e "s#$TEST_DIR#TEST_DIR#g" \
-e "s#$IMGFMT#IMGFMT#g" \ -e "s#$IMGFMT#IMGFMT#g" \
-e 's#nbd://127.0.0.1:10810$#TEST_DIR/t.IMGFMT#g' \ -e 's#nbd://127.0.0.1:10810$#TEST_DIR/t.IMGFMT#g' \

View File

@ -147,8 +147,9 @@ else
TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
TEST_IMG="ssh://127.0.0.1$TEST_IMG_FILE" TEST_IMG="ssh://127.0.0.1$TEST_IMG_FILE"
elif [ "$IMGPROTO" = "nfs" ]; then elif [ "$IMGPROTO" = "nfs" ]; then
TEST_DIR="nfs://127.0.0.1/$TEST_DIR" TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
TEST_IMG=$TEST_DIR/t.$IMGFMT REMOTE_TEST_DIR="nfs://127.0.0.1$TEST_DIR"
TEST_IMG="nfs://127.0.0.1$TEST_IMG_FILE"
elif [ "$IMGPROTO" = "vxhs" ]; then elif [ "$IMGPROTO" = "vxhs" ]; then
TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
TEST_IMG="vxhs://127.0.0.1:9999/t.$IMGFMT" TEST_IMG="vxhs://127.0.0.1:9999/t.$IMGFMT"
@ -173,6 +174,10 @@ if [ ! -d "$TEST_DIR" ]; then
exit 1 exit 1
fi fi
if [ -z "$REMOTE_TEST_DIR" ]; then
REMOTE_TEST_DIR="$TEST_DIR"
fi
if [ ! -d "$SAMPLE_IMG_DIR" ]; then if [ ! -d "$SAMPLE_IMG_DIR" ]; then
echo "common.config: Error: \$SAMPLE_IMG_DIR ($SAMPLE_IMG_DIR) is not a directory" echo "common.config: Error: \$SAMPLE_IMG_DIR ($SAMPLE_IMG_DIR) is not a directory"
exit 1 exit 1
@ -333,7 +338,8 @@ _img_info()
discard=0 discard=0
regex_json_spec_start='^ *"format-specific": \{' regex_json_spec_start='^ *"format-specific": \{'
$QEMU_IMG info $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" 2>&1 | \ $QEMU_IMG info $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" 2>&1 | \
sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \ sed -e "s#$REMOTE_TEST_DIR#TEST_DIR#g" \
-e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
-e "s#$TEST_DIR#TEST_DIR#g" \ -e "s#$TEST_DIR#TEST_DIR#g" \
-e "s#$IMGFMT#IMGFMT#g" \ -e "s#$IMGFMT#IMGFMT#g" \
-e "/^disk size:/ D" \ -e "/^disk size:/ D" \

View File

@ -97,7 +97,7 @@
088 rw auto quick 088 rw auto quick
089 rw auto quick 089 rw auto quick
090 rw auto quick 090 rw auto quick
091 rw auto 091 rw auto migration
092 rw auto quick 092 rw auto quick
093 auto 093 auto
094 rw auto quick 094 rw auto quick
@ -169,7 +169,7 @@
162 auto quick 162 auto quick
163 rw auto 163 rw auto
165 rw auto quick 165 rw auto quick
169 rw auto quick 169 rw auto quick migration
170 rw auto quick 170 rw auto quick
171 rw auto quick 171 rw auto quick
172 auto 172 auto
@ -194,14 +194,14 @@
192 rw auto quick 192 rw auto quick
194 rw auto migration quick 194 rw auto migration quick
195 rw auto quick 195 rw auto quick
196 rw auto quick 196 rw auto quick migration
197 rw auto quick 197 rw auto quick
198 rw auto 198 rw auto
199 rw auto 199 rw auto migration
200 rw auto 200 rw auto
201 rw auto migration 201 rw auto migration
202 rw auto quick 202 rw auto quick
203 rw auto 203 rw auto migration
204 rw auto quick 204 rw auto quick
205 rw auto quick 205 rw auto quick
206 rw auto 206 rw auto
@ -216,3 +216,4 @@
215 rw auto quick 215 rw auto quick
216 rw auto quick 216 rw auto quick
218 rw auto quick 218 rw auto quick
219 rw auto

View File

@ -363,6 +363,27 @@ class VM(qtest.QEMUQtestMachine):
return self.qmp('human-monitor-command', return self.qmp('human-monitor-command',
command_line='qemu-io %s "%s"' % (drive, cmd)) command_line='qemu-io %s "%s"' % (drive, cmd))
def flatten_qmp_object(self, obj, output=None, basestr=''):
if output is None:
output = dict()
if isinstance(obj, list):
for i in range(len(obj)):
self.flatten_qmp_object(obj[i], output, basestr + str(i) + '.')
elif isinstance(obj, dict):
for key in obj:
self.flatten_qmp_object(obj[key], output, basestr + key + '.')
else:
output[basestr[:-1]] = obj # Strip trailing '.'
return output
def qmp_to_opts(self, obj):
obj = self.flatten_qmp_object(obj)
output_list = list()
for key in obj:
output_list += [key + '=' + obj[key]]
return ','.join(output_list)
index_re = re.compile(r'([^\[]+)\[([^\]]+)\]') index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')
@ -390,26 +411,6 @@ class QMPTestCase(unittest.TestCase):
self.fail('invalid index "%s" in path "%s" in "%s"' % (idx, path, str(d))) self.fail('invalid index "%s" in path "%s" in "%s"' % (idx, path, str(d)))
return d return d
def flatten_qmp_object(self, obj, output=None, basestr=''):
if output is None:
output = dict()
if isinstance(obj, list):
for i in range(len(obj)):
self.flatten_qmp_object(obj[i], output, basestr + str(i) + '.')
elif isinstance(obj, dict):
for key in obj:
self.flatten_qmp_object(obj[key], output, basestr + key + '.')
else:
output[basestr[:-1]] = obj # Strip trailing '.'
return output
def qmp_to_opts(self, obj):
obj = self.flatten_qmp_object(obj)
output_list = list()
for key in obj:
output_list += [key + '=' + obj[key]]
return ','.join(output_list)
def assert_qmp_absent(self, d, path): def assert_qmp_absent(self, d, path):
try: try:
result = self.dictpath(d, path) result = self.dictpath(d, path)
@ -444,8 +445,8 @@ class QMPTestCase(unittest.TestCase):
'''Asserts that the given filename is a json: filename and that its '''Asserts that the given filename is a json: filename and that its
content is equal to the given reference object''' content is equal to the given reference object'''
self.assertEqual(json_filename[:5], 'json:') self.assertEqual(json_filename[:5], 'json:')
self.assertEqual(self.flatten_qmp_object(json.loads(json_filename[5:])), self.assertEqual(self.vm.flatten_qmp_object(json.loads(json_filename[5:])),
self.flatten_qmp_object(reference)) self.vm.flatten_qmp_object(reference))
def cancel_and_wait(self, drive='drive0', force=False, resume=False): def cancel_and_wait(self, drive='drive0', force=False, resume=False):
'''Cancel a block job and wait for it to finish, returning the event''' '''Cancel a block job and wait for it to finish, returning the event'''
@ -464,6 +465,9 @@ class QMPTestCase(unittest.TestCase):
self.assert_qmp(event, 'data/device', drive) self.assert_qmp(event, 'data/device', drive)
result = event result = event
cancelled = True cancelled = True
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', drive)
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
return result return result
@ -479,6 +483,8 @@ class QMPTestCase(unittest.TestCase):
self.assert_qmp(event, 'data/offset', event['data']['len']) self.assert_qmp(event, 'data/offset', event['data']['len'])
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
return event return event
elif event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', drive)
def wait_ready(self, drive='drive0'): def wait_ready(self, drive='drive0'):
'''Wait until a block job BLOCK_JOB_READY event''' '''Wait until a block job BLOCK_JOB_READY event'''

View File

@ -496,33 +496,38 @@ typedef struct TestBlockJob {
bool should_complete; bool should_complete;
} TestBlockJob; } TestBlockJob;
static void test_job_completed(BlockJob *job, void *opaque) static void test_job_completed(Job *job, void *opaque)
{ {
block_job_completed(job, 0); job_completed(job, 0);
} }
static void coroutine_fn test_job_start(void *opaque) static void coroutine_fn test_job_start(void *opaque)
{ {
TestBlockJob *s = opaque; TestBlockJob *s = opaque;
block_job_event_ready(&s->common); job_transition_to_ready(&s->common.job);
while (!s->should_complete) { while (!s->should_complete) {
block_job_sleep_ns(&s->common, 100000); job_sleep_ns(&s->common.job, 100000);
} }
block_job_defer_to_main_loop(&s->common, test_job_completed, NULL); job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
} }
static void test_job_complete(BlockJob *job, Error **errp) static void test_job_complete(Job *job, Error **errp)
{ {
TestBlockJob *s = container_of(job, TestBlockJob, common); TestBlockJob *s = container_of(job, TestBlockJob, common.job);
s->should_complete = true; s->should_complete = true;
} }
BlockJobDriver test_job_driver = { BlockJobDriver test_job_driver = {
.instance_size = sizeof(TestBlockJob), .job_driver = {
.start = test_job_start, .instance_size = sizeof(TestBlockJob),
.complete = test_job_complete, .free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.start = test_job_start,
.complete = test_job_complete,
},
}; };
static void test_blockjob_common(enum drain_type drain_type) static void test_blockjob_common(enum drain_type drain_type)
@ -545,49 +550,49 @@ static void test_blockjob_common(enum drain_type drain_type)
job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL, job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
0, 0, NULL, NULL, &error_abort); 0, 0, NULL, NULL, &error_abort);
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
block_job_start(job); job_start(&job->job);
g_assert_cmpint(job->pause_count, ==, 0); g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->paused); g_assert_false(job->job.paused);
g_assert_false(job->busy); /* We're in block_job_sleep_ns() */ g_assert_false(job->job.busy); /* We're in job_sleep_ns() */
do_drain_begin(drain_type, src); do_drain_begin(drain_type, src);
if (drain_type == BDRV_DRAIN_ALL) { if (drain_type == BDRV_DRAIN_ALL) {
/* bdrv_drain_all() drains both src and target */ /* bdrv_drain_all() drains both src and target */
g_assert_cmpint(job->pause_count, ==, 2); g_assert_cmpint(job->job.pause_count, ==, 2);
} else { } else {
g_assert_cmpint(job->pause_count, ==, 1); g_assert_cmpint(job->job.pause_count, ==, 1);
} }
/* XXX We don't wait until the job is actually paused. Is this okay? */ /* XXX We don't wait until the job is actually paused. Is this okay? */
/* g_assert_true(job->paused); */ /* g_assert_true(job->job.paused); */
g_assert_false(job->busy); /* The job is paused */ g_assert_false(job->job.busy); /* The job is paused */
do_drain_end(drain_type, src); do_drain_end(drain_type, src);
g_assert_cmpint(job->pause_count, ==, 0); g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->paused); g_assert_false(job->job.paused);
g_assert_false(job->busy); /* We're in block_job_sleep_ns() */ g_assert_false(job->job.busy); /* We're in job_sleep_ns() */
do_drain_begin(drain_type, target); do_drain_begin(drain_type, target);
if (drain_type == BDRV_DRAIN_ALL) { if (drain_type == BDRV_DRAIN_ALL) {
/* bdrv_drain_all() drains both src and target */ /* bdrv_drain_all() drains both src and target */
g_assert_cmpint(job->pause_count, ==, 2); g_assert_cmpint(job->job.pause_count, ==, 2);
} else { } else {
g_assert_cmpint(job->pause_count, ==, 1); g_assert_cmpint(job->job.pause_count, ==, 1);
} }
/* XXX We don't wait until the job is actually paused. Is this okay? */ /* XXX We don't wait until the job is actually paused. Is this okay? */
/* g_assert_true(job->paused); */ /* g_assert_true(job->job.paused); */
g_assert_false(job->busy); /* The job is paused */ g_assert_false(job->job.busy); /* The job is paused */
do_drain_end(drain_type, target); do_drain_end(drain_type, target);
g_assert_cmpint(job->pause_count, ==, 0); g_assert_cmpint(job->job.pause_count, ==, 0);
g_assert_false(job->paused); g_assert_false(job->job.paused);
g_assert_false(job->busy); /* We're in block_job_sleep_ns() */ g_assert_false(job->job.busy); /* We're in job_sleep_ns() */
ret = block_job_complete_sync(job, &error_abort); ret = job_complete_sync(&job->job, &error_abort);
g_assert_cmpint(ret, ==, 0); g_assert_cmpint(ret, ==, 0);
blk_unref(blk_src); blk_unref(blk_src);

View File

@ -24,16 +24,17 @@ typedef struct {
int *result; int *result;
} TestBlockJob; } TestBlockJob;
static void test_block_job_complete(BlockJob *job, void *opaque) static void test_block_job_complete(Job *job, void *opaque)
{ {
BlockDriverState *bs = blk_bs(job->blk); BlockJob *bjob = container_of(job, BlockJob, job);
BlockDriverState *bs = blk_bs(bjob->blk);
int rc = (intptr_t)opaque; int rc = (intptr_t)opaque;
if (block_job_is_cancelled(job)) { if (job_is_cancelled(job)) {
rc = -ECANCELED; rc = -ECANCELED;
} }
block_job_completed(job, rc); job_completed(job, rc);
bdrv_unref(bs); bdrv_unref(bs);
} }
@ -44,18 +45,18 @@ static void coroutine_fn test_block_job_run(void *opaque)
while (s->iterations--) { while (s->iterations--) {
if (s->use_timer) { if (s->use_timer) {
block_job_sleep_ns(job, 0); job_sleep_ns(&job->job, 0);
} else { } else {
block_job_yield(job); job_yield(&job->job);
} }
if (block_job_is_cancelled(job)) { if (job_is_cancelled(&job->job)) {
break; break;
} }
} }
block_job_defer_to_main_loop(job, test_block_job_complete, job_defer_to_main_loop(&job->job, test_block_job_complete,
(void *)(intptr_t)s->rc); (void *)(intptr_t)s->rc);
} }
typedef struct { typedef struct {
@ -66,7 +67,7 @@ typedef struct {
static void test_block_job_cb(void *opaque, int ret) static void test_block_job_cb(void *opaque, int ret)
{ {
TestBlockJobCBData *data = opaque; TestBlockJobCBData *data = opaque;
if (!ret && block_job_is_cancelled(&data->job->common)) { if (!ret && job_is_cancelled(&data->job->common.job)) {
ret = -ECANCELED; ret = -ECANCELED;
} }
*data->result = ret; *data->result = ret;
@ -74,8 +75,13 @@ static void test_block_job_cb(void *opaque, int ret)
} }
static const BlockJobDriver test_block_job_driver = { static const BlockJobDriver test_block_job_driver = {
.instance_size = sizeof(TestBlockJob), .job_driver = {
.start = test_block_job_run, .instance_size = sizeof(TestBlockJob),
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.start = test_block_job_run,
},
}; };
/* Create a block job that completes with a given return code after a given /* Create a block job that completes with a given return code after a given
@ -87,7 +93,7 @@ static const BlockJobDriver test_block_job_driver = {
*/ */
static BlockJob *test_block_job_start(unsigned int iterations, static BlockJob *test_block_job_start(unsigned int iterations,
bool use_timer, bool use_timer,
int rc, int *result, BlockJobTxn *txn) int rc, int *result, JobTxn *txn)
{ {
BlockDriverState *bs; BlockDriverState *bs;
TestBlockJob *s; TestBlockJob *s;
@ -102,7 +108,7 @@ static BlockJob *test_block_job_start(unsigned int iterations,
snprintf(job_id, sizeof(job_id), "job%u", counter++); snprintf(job_id, sizeof(job_id), "job%u", counter++);
s = block_job_create(job_id, &test_block_job_driver, txn, bs, s = block_job_create(job_id, &test_block_job_driver, txn, bs,
0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, 0, BLK_PERM_ALL, 0, JOB_DEFAULT,
test_block_job_cb, data, &error_abort); test_block_job_cb, data, &error_abort);
s->iterations = iterations; s->iterations = iterations;
s->use_timer = use_timer; s->use_timer = use_timer;
@ -116,15 +122,15 @@ static BlockJob *test_block_job_start(unsigned int iterations,
static void test_single_job(int expected) static void test_single_job(int expected)
{ {
BlockJob *job; BlockJob *job;
BlockJobTxn *txn; JobTxn *txn;
int result = -EINPROGRESS; int result = -EINPROGRESS;
txn = block_job_txn_new(); txn = job_txn_new();
job = test_block_job_start(1, true, expected, &result, txn); job = test_block_job_start(1, true, expected, &result, txn);
block_job_start(job); job_start(&job->job);
if (expected == -ECANCELED) { if (expected == -ECANCELED) {
block_job_cancel(job, false); job_cancel(&job->job, false);
} }
while (result == -EINPROGRESS) { while (result == -EINPROGRESS) {
@ -132,7 +138,7 @@ static void test_single_job(int expected)
} }
g_assert_cmpint(result, ==, expected); g_assert_cmpint(result, ==, expected);
block_job_txn_unref(txn); job_txn_unref(txn);
} }
static void test_single_job_success(void) static void test_single_job_success(void)
@ -154,26 +160,26 @@ static void test_pair_jobs(int expected1, int expected2)
{ {
BlockJob *job1; BlockJob *job1;
BlockJob *job2; BlockJob *job2;
BlockJobTxn *txn; JobTxn *txn;
int result1 = -EINPROGRESS; int result1 = -EINPROGRESS;
int result2 = -EINPROGRESS; int result2 = -EINPROGRESS;
txn = block_job_txn_new(); txn = job_txn_new();
job1 = test_block_job_start(1, true, expected1, &result1, txn); job1 = test_block_job_start(1, true, expected1, &result1, txn);
job2 = test_block_job_start(2, true, expected2, &result2, txn); job2 = test_block_job_start(2, true, expected2, &result2, txn);
block_job_start(job1); job_start(&job1->job);
block_job_start(job2); job_start(&job2->job);
/* Release our reference now to trigger as many nice /* Release our reference now to trigger as many nice
* use-after-free bugs as possible. * use-after-free bugs as possible.
*/ */
block_job_txn_unref(txn); job_txn_unref(txn);
if (expected1 == -ECANCELED) { if (expected1 == -ECANCELED) {
block_job_cancel(job1, false); job_cancel(&job1->job, false);
} }
if (expected2 == -ECANCELED) { if (expected2 == -ECANCELED) {
block_job_cancel(job2, false); job_cancel(&job2->job, false);
} }
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
@ -216,23 +222,23 @@ static void test_pair_jobs_fail_cancel_race(void)
{ {
BlockJob *job1; BlockJob *job1;
BlockJob *job2; BlockJob *job2;
BlockJobTxn *txn; JobTxn *txn;
int result1 = -EINPROGRESS; int result1 = -EINPROGRESS;
int result2 = -EINPROGRESS; int result2 = -EINPROGRESS;
txn = block_job_txn_new(); txn = job_txn_new();
job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn); job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn);
job2 = test_block_job_start(2, false, 0, &result2, txn); job2 = test_block_job_start(2, false, 0, &result2, txn);
block_job_start(job1); job_start(&job1->job);
block_job_start(job2); job_start(&job2->job);
block_job_cancel(job1, false); job_cancel(&job1->job, false);
/* Now make job2 finish before the main loop kicks jobs. This simulates /* Now make job2 finish before the main loop kicks jobs. This simulates
* the race between a pending kick and another job completing. * the race between a pending kick and another job completing.
*/ */
block_job_enter(job2); job_enter(&job2->job);
block_job_enter(job2); job_enter(&job2->job);
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
aio_poll(qemu_get_aio_context(), true); aio_poll(qemu_get_aio_context(), true);
@ -241,7 +247,7 @@ static void test_pair_jobs_fail_cancel_race(void)
g_assert_cmpint(result1, ==, -ECANCELED); g_assert_cmpint(result1, ==, -ECANCELED);
g_assert_cmpint(result2, ==, -ECANCELED); g_assert_cmpint(result2, ==, -ECANCELED);
block_job_txn_unref(txn); job_txn_unref(txn);
} }
int main(int argc, char **argv) int main(int argc, char **argv)

View File

@ -17,7 +17,12 @@
#include "sysemu/block-backend.h" #include "sysemu/block-backend.h"
static const BlockJobDriver test_block_job_driver = { static const BlockJobDriver test_block_job_driver = {
.instance_size = sizeof(BlockJob), .job_driver = {
.instance_size = sizeof(BlockJob),
.free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
},
}; };
static void block_job_cb(void *opaque, int ret) static void block_job_cb(void *opaque, int ret)
@ -38,9 +43,9 @@ static BlockJob *mk_job(BlockBackend *blk, const char *id,
g_assert_null(errp); g_assert_null(errp);
g_assert_nonnull(job); g_assert_nonnull(job);
if (id) { if (id) {
g_assert_cmpstr(job->id, ==, id); g_assert_cmpstr(job->job.id, ==, id);
} else { } else {
g_assert_cmpstr(job->id, ==, blk_name(blk)); g_assert_cmpstr(job->job.id, ==, blk_name(blk));
} }
} else { } else {
g_assert_nonnull(errp); g_assert_nonnull(errp);
@ -55,7 +60,7 @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id,
bool should_succeed) bool should_succeed)
{ {
return mk_job(blk, id, &test_block_job_driver, return mk_job(blk, id, &test_block_job_driver,
should_succeed, BLOCK_JOB_DEFAULT); should_succeed, JOB_DEFAULT);
} }
/* This creates a BlockBackend (optionally with a name) with a /* This creates a BlockBackend (optionally with a name) with a
@ -124,11 +129,11 @@ static void test_job_ids(void)
job[1] = do_test_id(blk[1], "id0", false); job[1] = do_test_id(blk[1], "id0", false);
/* But once job[0] finishes we can reuse its ID */ /* But once job[0] finishes we can reuse its ID */
block_job_early_fail(job[0]); job_early_fail(&job[0]->job);
job[1] = do_test_id(blk[1], "id0", true); job[1] = do_test_id(blk[1], "id0", true);
/* No job ID specified, defaults to the backend name ('drive1') */ /* No job ID specified, defaults to the backend name ('drive1') */
block_job_early_fail(job[1]); job_early_fail(&job[1]->job);
job[1] = do_test_id(blk[1], NULL, true); job[1] = do_test_id(blk[1], NULL, true);
/* Duplicate job ID */ /* Duplicate job ID */
@ -141,9 +146,9 @@ static void test_job_ids(void)
/* This one is valid */ /* This one is valid */
job[2] = do_test_id(blk[2], "id_2", true); job[2] = do_test_id(blk[2], "id_2", true);
block_job_early_fail(job[0]); job_early_fail(&job[0]->job);
block_job_early_fail(job[1]); job_early_fail(&job[1]->job);
block_job_early_fail(job[2]); job_early_fail(&job[2]->job);
destroy_blk(blk[0]); destroy_blk(blk[0]);
destroy_blk(blk[1]); destroy_blk(blk[1]);
@ -158,16 +163,16 @@ typedef struct CancelJob {
bool completed; bool completed;
} CancelJob; } CancelJob;
static void cancel_job_completed(BlockJob *job, void *opaque) static void cancel_job_completed(Job *job, void *opaque)
{ {
CancelJob *s = opaque; CancelJob *s = opaque;
s->completed = true; s->completed = true;
block_job_completed(job, 0); job_completed(job, 0);
} }
static void cancel_job_complete(BlockJob *job, Error **errp) static void cancel_job_complete(Job *job, Error **errp)
{ {
CancelJob *s = container_of(job, CancelJob, common); CancelJob *s = container_of(job, CancelJob, common.job);
s->should_complete = true; s->should_complete = true;
} }
@ -176,25 +181,30 @@ static void coroutine_fn cancel_job_start(void *opaque)
CancelJob *s = opaque; CancelJob *s = opaque;
while (!s->should_complete) { while (!s->should_complete) {
if (block_job_is_cancelled(&s->common)) { if (job_is_cancelled(&s->common.job)) {
goto defer; goto defer;
} }
if (!s->common.ready && s->should_converge) { if (!job_is_ready(&s->common.job) && s->should_converge) {
block_job_event_ready(&s->common); job_transition_to_ready(&s->common.job);
} }
block_job_sleep_ns(&s->common, 100000); job_sleep_ns(&s->common.job, 100000);
} }
defer: defer:
block_job_defer_to_main_loop(&s->common, cancel_job_completed, s); job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
} }
static const BlockJobDriver test_cancel_driver = { static const BlockJobDriver test_cancel_driver = {
.instance_size = sizeof(CancelJob), .job_driver = {
.start = cancel_job_start, .instance_size = sizeof(CancelJob),
.complete = cancel_job_complete, .free = block_job_free,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
.start = cancel_job_start,
.complete = cancel_job_complete,
},
}; };
static CancelJob *create_common(BlockJob **pjob) static CancelJob *create_common(BlockJob **pjob)
@ -205,9 +215,9 @@ static CancelJob *create_common(BlockJob **pjob)
blk = create_blk(NULL); blk = create_blk(NULL);
job = mk_job(blk, "Steve", &test_cancel_driver, true, job = mk_job(blk, "Steve", &test_cancel_driver, true,
BLOCK_JOB_MANUAL_FINALIZE | BLOCK_JOB_MANUAL_DISMISS); JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
block_job_ref(job); job_ref(&job->job);
assert(job->status == BLOCK_JOB_STATUS_CREATED); assert(job->job.status == JOB_STATUS_CREATED);
s = container_of(job, CancelJob, common); s = container_of(job, CancelJob, common);
s->blk = blk; s->blk = blk;
@ -219,16 +229,15 @@ static void cancel_common(CancelJob *s)
{ {
BlockJob *job = &s->common; BlockJob *job = &s->common;
BlockBackend *blk = s->blk; BlockBackend *blk = s->blk;
BlockJobStatus sts = job->status; JobStatus sts = job->job.status;
block_job_cancel_sync(job); job_cancel_sync(&job->job);
if ((sts != BLOCK_JOB_STATUS_CREATED) && if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
(sts != BLOCK_JOB_STATUS_CONCLUDED)) { Job *dummy = &job->job;
BlockJob *dummy = job; job_dismiss(&dummy, &error_abort);
block_job_dismiss(&dummy, &error_abort);
} }
assert(job->status == BLOCK_JOB_STATUS_NULL); assert(job->job.status == JOB_STATUS_NULL);
block_job_unref(job); job_unref(&job->job);
destroy_blk(blk); destroy_blk(blk);
} }
@ -248,8 +257,8 @@ static void test_cancel_running(void)
s = create_common(&job); s = create_common(&job);
block_job_start(job); job_start(&job->job);
assert(job->status == BLOCK_JOB_STATUS_RUNNING); assert(job->job.status == JOB_STATUS_RUNNING);
cancel_common(s); cancel_common(s);
} }
@ -261,12 +270,12 @@ static void test_cancel_paused(void)
s = create_common(&job); s = create_common(&job);
block_job_start(job); job_start(&job->job);
assert(job->status == BLOCK_JOB_STATUS_RUNNING); assert(job->job.status == JOB_STATUS_RUNNING);
block_job_user_pause(job, &error_abort); job_user_pause(&job->job, &error_abort);
block_job_enter(job); job_enter(&job->job);
assert(job->status == BLOCK_JOB_STATUS_PAUSED); assert(job->job.status == JOB_STATUS_PAUSED);
cancel_common(s); cancel_common(s);
} }
@ -278,12 +287,12 @@ static void test_cancel_ready(void)
s = create_common(&job); s = create_common(&job);
block_job_start(job); job_start(&job->job);
assert(job->status == BLOCK_JOB_STATUS_RUNNING); assert(job->job.status == JOB_STATUS_RUNNING);
s->should_converge = true; s->should_converge = true;
block_job_enter(job); job_enter(&job->job);
assert(job->status == BLOCK_JOB_STATUS_READY); assert(job->job.status == JOB_STATUS_READY);
cancel_common(s); cancel_common(s);
} }
@ -295,16 +304,16 @@ static void test_cancel_standby(void)
s = create_common(&job); s = create_common(&job);
block_job_start(job); job_start(&job->job);
assert(job->status == BLOCK_JOB_STATUS_RUNNING); assert(job->job.status == JOB_STATUS_RUNNING);
s->should_converge = true; s->should_converge = true;
block_job_enter(job); job_enter(&job->job);
assert(job->status == BLOCK_JOB_STATUS_READY); assert(job->job.status == JOB_STATUS_READY);
block_job_user_pause(job, &error_abort); job_user_pause(&job->job, &error_abort);
block_job_enter(job); job_enter(&job->job);
assert(job->status == BLOCK_JOB_STATUS_STANDBY); assert(job->job.status == JOB_STATUS_STANDBY);
cancel_common(s); cancel_common(s);
} }
@ -316,19 +325,19 @@ static void test_cancel_pending(void)
s = create_common(&job); s = create_common(&job);
block_job_start(job); job_start(&job->job);
assert(job->status == BLOCK_JOB_STATUS_RUNNING); assert(job->job.status == JOB_STATUS_RUNNING);
s->should_converge = true; s->should_converge = true;
block_job_enter(job); job_enter(&job->job);
assert(job->status == BLOCK_JOB_STATUS_READY); assert(job->job.status == JOB_STATUS_READY);
block_job_complete(job, &error_abort); job_complete(&job->job, &error_abort);
block_job_enter(job); job_enter(&job->job);
while (!s->completed) { while (!s->completed) {
aio_poll(qemu_get_aio_context(), true); aio_poll(qemu_get_aio_context(), true);
} }
assert(job->status == BLOCK_JOB_STATUS_PENDING); assert(job->job.status == JOB_STATUS_PENDING);
cancel_common(s); cancel_common(s);
} }
@ -340,22 +349,22 @@ static void test_cancel_concluded(void)
s = create_common(&job); s = create_common(&job);
block_job_start(job); job_start(&job->job);
assert(job->status == BLOCK_JOB_STATUS_RUNNING); assert(job->job.status == JOB_STATUS_RUNNING);
s->should_converge = true; s->should_converge = true;
block_job_enter(job); job_enter(&job->job);
assert(job->status == BLOCK_JOB_STATUS_READY); assert(job->job.status == JOB_STATUS_READY);
block_job_complete(job, &error_abort); job_complete(&job->job, &error_abort);
block_job_enter(job); job_enter(&job->job);
while (!s->completed) { while (!s->completed) {
aio_poll(qemu_get_aio_context(), true); aio_poll(qemu_get_aio_context(), true);
} }
assert(job->status == BLOCK_JOB_STATUS_PENDING); assert(job->job.status == JOB_STATUS_PENDING);
block_job_finalize(job, &error_abort); job_finalize(&job->job, &error_abort);
assert(job->status == BLOCK_JOB_STATUS_CONCLUDED); assert(job->job.status == JOB_STATUS_CONCLUDED);
cancel_common(s); cancel_common(s);
} }

View File

@ -104,6 +104,20 @@ gdbstub_err_invalid_rle(void) "got invalid RLE sequence"
gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x" gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x"
gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x" gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x"
# job.c
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
# job-qmp.c
qmp_job_cancel(void *job) "job %p"
qmp_job_pause(void *job) "job %p"
qmp_job_resume(void *job) "job %p"
qmp_job_complete(void *job) "job %p"
qmp_job_finalize(void *job) "job %p"
qmp_job_dismiss(void *job) "job %p"
### Guest events, keep at bottom ### Guest events, keep at bottom

1
vl.c
View File

@ -4683,6 +4683,7 @@ int main(int argc, char **argv, char **envp)
/* No more vcpu or device emulation activity beyond this point */ /* No more vcpu or device emulation activity beyond this point */
vm_shutdown(); vm_shutdown();
job_cancel_sync_all();
bdrv_close_all(); bdrv_close_all();
res_free(); res_free();