Block patches:

- Make backup block jobs use asynchronous requests with the block-copy
   module
 - Use COR filter node for stream block jobs
 - Make coroutine-sigaltstack’s qemu_coroutine_new() function thread-safe
 - Report error string when file locking fails with an unexpected errno
 - iotest fixes, additions, and some refactoring
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAmAQJFsSHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9AWZEIALwZwQU82RE+gezeDK7XPqBYuGMOVzbJ
 wdvQLnAx+vEo2idLlWm4RE7GhibhTB5iRaiIGmpZhvja/+2BrZHve4GHD5+GBolF
 QvLoIdfkPqhORhuAMc1+8rkP4yqv1bLBuOB6TLh15Bw6B6Q7clQbDGqubK2HIwgb
 h/RcmMj/93ZN27WM589Zdk5i/1kE11sOyCKNP+UQ3hnKz7ecqtY7pHYvFS3Q9e6l
 xpc/QyFhTw/OHfaUViL6W0MWhdqhf/mBlXqzvFoqVWEfdJs+56E1BeUsHRUSxTjs
 2v6y7AC3s9gZC/9Icpui3mXvxAQQPTsaF0UfjEPiW+q+NiC07FtdqHo=
 =S1Dc
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2021-01-26' into staging

Block patches:
- Make backup block jobs use asynchronous requests with the block-copy
  module
- Use COR filter node for stream block jobs
- Make coroutine-sigaltstack’s qemu_coroutine_new() function thread-safe
- Report error string when file locking fails with an unexpected errno
- iotest fixes, additions, and some refactoring

# gpg: Signature made Tue 26 Jan 2021 14:16:59 GMT
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2021-01-26: (53 commits)
  iotests/178: Pass value to invalid option
  iotests/118: Drop 'change' test
  iotests: Add test for the regression fixed in c8bf9a9169
  block: report errno when flock fcntl fails
  simplebench: add bench-backup.py
  simplebench: bench_block_job: add cmd_options argument
  simplebench/bench_block_job: use correct shebang line with python3
  block/block-copy: drop unused argument of block_copy()
  block/block-copy: drop unused block_copy_set_progress_callback()
  qapi: backup: disable copy_range by default
  backup: move to block-copy
  block/backup: drop extra gotos from backup_run()
  block/block-copy: make progress_bytes_callback optional
  iotests: 257: prepare for backup over block-copy
  iotests: 219: prepare for backup over block-copy
  iotests: 185: prepare for backup over block-copy
  iotests/129: Limit backup's max-chunk/max-workers
  iotests: 56: prepare for backup over block-copy
  qapi: backup: add max-chunk and max-workers to x-perf struct
  job: call job_enter from job_pause
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-01-26 21:08:13 +00:00
commit 565c86af51
51 changed files with 1796 additions and 546 deletions

25
block.c
View File

@ -4660,6 +4660,31 @@ static void bdrv_delete(BlockDriverState *bs)
g_free(bs); g_free(bs);
} }
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options,
int flags, Error **errp)
{
BlockDriverState *new_node_bs;
Error *local_err = NULL;
new_node_bs = bdrv_open(NULL, NULL, node_options, flags, errp);
if (new_node_bs == NULL) {
error_prepend(errp, "Could not create node: ");
return NULL;
}
bdrv_drained_begin(bs);
bdrv_replace_node(bs, new_node_bs, &local_err);
bdrv_drained_end(bs);
if (local_err) {
bdrv_unref(new_node_bs);
error_propagate(errp, local_err);
return NULL;
}
return new_node_bs;
}
/* /*
* Run consistency checks on an image * Run consistency checks on an image
* *

View File

@ -61,7 +61,7 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
off = QEMU_ALIGN_DOWN(offset, s->cluster_size); off = QEMU_ALIGN_DOWN(offset, s->cluster_size);
end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size); end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size);
return block_copy(s->bcs, off, end - off, NULL); return block_copy(s->bcs, off, end - off, true);
} }
static int coroutine_fn backup_top_co_pdiscard(BlockDriverState *bs, static int coroutine_fn backup_top_co_pdiscard(BlockDriverState *bs,
@ -186,6 +186,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
BlockDriverState *target, BlockDriverState *target,
const char *filter_node_name, const char *filter_node_name,
uint64_t cluster_size, uint64_t cluster_size,
BackupPerf *perf,
BdrvRequestFlags write_flags, BdrvRequestFlags write_flags,
BlockCopyState **bcs, BlockCopyState **bcs,
Error **errp) Error **errp)
@ -244,7 +245,8 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
state->cluster_size = cluster_size; state->cluster_size = cluster_size;
state->bcs = block_copy_state_new(top->backing, state->target, state->bcs = block_copy_state_new(top->backing, state->target,
cluster_size, write_flags, &local_err); cluster_size, perf->use_copy_range,
write_flags, &local_err);
if (local_err) { if (local_err) {
error_prepend(&local_err, "Cannot create block-copy-state: "); error_prepend(&local_err, "Cannot create block-copy-state: ");
goto fail; goto fail;

View File

@ -33,6 +33,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
BlockDriverState *target, BlockDriverState *target,
const char *filter_node_name, const char *filter_node_name,
uint64_t cluster_size, uint64_t cluster_size,
BackupPerf *perf,
BdrvRequestFlags write_flags, BdrvRequestFlags write_flags,
BlockCopyState **bcs, BlockCopyState **bcs,
Error **errp); Error **errp);

View File

@ -22,7 +22,6 @@
#include "block/block-copy.h" #include "block/block-copy.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "qapi/qmp/qerror.h" #include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
#include "qemu/cutils.h" #include "qemu/cutils.h"
#include "sysemu/block-backend.h" #include "sysemu/block-backend.h"
#include "qemu/bitmap.h" #include "qemu/bitmap.h"
@ -44,40 +43,17 @@ typedef struct BackupBlockJob {
BlockdevOnError on_source_error; BlockdevOnError on_source_error;
BlockdevOnError on_target_error; BlockdevOnError on_target_error;
uint64_t len; uint64_t len;
uint64_t bytes_read;
int64_t cluster_size; int64_t cluster_size;
BackupPerf perf;
BlockCopyState *bcs; BlockCopyState *bcs;
bool wait;
BlockCopyCallState *bg_bcs_call;
} BackupBlockJob; } BackupBlockJob;
static const BlockJobDriver backup_job_driver; static const BlockJobDriver backup_job_driver;
static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
{
BackupBlockJob *s = opaque;
s->bytes_read += bytes;
}
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t offset, uint64_t bytes,
bool *error_is_read)
{
int ret = 0;
int64_t start, end; /* bytes */
start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
trace_backup_do_cow_enter(job, start, offset, bytes);
ret = block_copy(job->bcs, start, end - start, error_is_read);
trace_backup_do_cow_return(job, offset, bytes, ret);
return ret;
}
static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
{ {
BdrvDirtyBitmap *bm; BdrvDirtyBitmap *bm;
@ -157,53 +133,96 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
} }
} }
static bool coroutine_fn yield_and_check(BackupBlockJob *job) static void coroutine_fn backup_block_copy_callback(void *opaque)
{ {
uint64_t delay_ns; BackupBlockJob *s = opaque;
if (job_is_cancelled(&job->common.job)) { if (s->wait) {
return true; s->wait = false;
aio_co_wake(s->common.job.co);
} else {
job_enter(&s->common.job);
} }
/*
* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
* return. Without a yield, the VM would not reboot.
*/
delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
job->bytes_read = 0;
job_sleep_ns(&job->common.job, delay_ns);
if (job_is_cancelled(&job->common.job)) {
return true;
}
return false;
} }
static int coroutine_fn backup_loop(BackupBlockJob *job) static int coroutine_fn backup_loop(BackupBlockJob *job)
{ {
bool error_is_read; BlockCopyCallState *s = NULL;
int64_t offset;
BdrvDirtyBitmapIter *bdbi;
int ret = 0; int ret = 0;
bool error_is_read;
BlockErrorAction act;
bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs)); while (true) { /* retry loop */
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) { job->bg_bcs_call = s = block_copy_async(job->bcs, 0,
do { QEMU_ALIGN_UP(job->len, job->cluster_size),
if (yield_and_check(job)) { job->perf.max_workers, job->perf.max_chunk,
goto out; backup_block_copy_callback, job);
}
ret = backup_do_cow(job, offset, job->cluster_size, &error_is_read); while (!block_copy_call_finished(s) &&
if (ret < 0 && backup_error_action(job, error_is_read, -ret) == !job_is_cancelled(&job->common.job))
BLOCK_ERROR_ACTION_REPORT) {
{ job_yield(&job->common.job);
goto out; }
}
} while (ret < 0); if (!block_copy_call_finished(s)) {
assert(job_is_cancelled(&job->common.job));
/*
* Note that we can't use job_yield() here, as it doesn't work for
* cancelled job.
*/
block_copy_call_cancel(s);
job->wait = true;
qemu_coroutine_yield();
assert(block_copy_call_finished(s));
ret = 0;
goto out;
}
if (job_is_cancelled(&job->common.job) ||
block_copy_call_succeeded(s))
{
ret = 0;
goto out;
}
if (block_copy_call_cancelled(s)) {
/*
* Job is not cancelled but only block-copy call. This is possible
* after job pause. Now the pause is finished, start new block-copy
* iteration.
*/
block_copy_call_free(s);
continue;
}
/* The only remaining case is failed block-copy call. */
assert(block_copy_call_failed(s));
ret = block_copy_call_status(s, &error_is_read);
act = backup_error_action(job, error_is_read, -ret);
switch (act) {
case BLOCK_ERROR_ACTION_REPORT:
goto out;
case BLOCK_ERROR_ACTION_STOP:
/*
* Go to pause prior to starting new block-copy call on the next
* iteration.
*/
job_pause_point(&job->common.job);
break;
case BLOCK_ERROR_ACTION_IGNORE:
/* Proceed to new block-copy call to retry. */
break;
default:
abort();
}
block_copy_call_free(s);
} }
out: out:
bdrv_dirty_iter_free(bdbi); block_copy_call_free(s);
job->bg_bcs_call = NULL;
return ret; return ret;
} }
@ -235,7 +254,7 @@ static void backup_init_bcs_bitmap(BackupBlockJob *job)
static int coroutine_fn backup_run(Job *job, Error **errp) static int coroutine_fn backup_run(Job *job, Error **errp)
{ {
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
int ret = 0; int ret;
backup_init_bcs_bitmap(s); backup_init_bcs_bitmap(s);
@ -244,14 +263,19 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
int64_t count; int64_t count;
for (offset = 0; offset < s->len; ) { for (offset = 0; offset < s->len; ) {
if (yield_and_check(s)) { if (job_is_cancelled(job)) {
ret = -ECANCELED; return -ECANCELED;
goto out; }
job_pause_point(job);
if (job_is_cancelled(job)) {
return -ECANCELED;
} }
ret = block_copy_reset_unallocated(s->bcs, offset, &count); ret = block_copy_reset_unallocated(s->bcs, offset, &count);
if (ret < 0) { if (ret < 0) {
goto out; return ret;
} }
offset += count; offset += count;
@ -272,11 +296,37 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
job_yield(job); job_yield(job);
} }
} else { } else {
ret = backup_loop(s); return backup_loop(s);
} }
out: return 0;
return ret; }
static void coroutine_fn backup_pause(Job *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
if (s->bg_bcs_call && !block_copy_call_finished(s->bg_bcs_call)) {
block_copy_call_cancel(s->bg_bcs_call);
s->wait = true;
qemu_coroutine_yield();
}
}
static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
/*
* block_job_set_speed() is called first from block_job_create(), when we
* don't yet have s->bcs.
*/
if (s->bcs) {
block_copy_set_speed(s->bcs, speed);
if (s->bg_bcs_call) {
block_copy_kick(s->bg_bcs_call);
}
}
} }
static const BlockJobDriver backup_job_driver = { static const BlockJobDriver backup_job_driver = {
@ -289,7 +339,9 @@ static const BlockJobDriver backup_job_driver = {
.commit = backup_commit, .commit = backup_commit,
.abort = backup_abort, .abort = backup_abort,
.clean = backup_clean, .clean = backup_clean,
} .pause = backup_pause,
},
.set_speed = backup_set_speed,
}; };
static int64_t backup_calculate_cluster_size(BlockDriverState *target, static int64_t backup_calculate_cluster_size(BlockDriverState *target,
@ -335,6 +387,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BitmapSyncMode bitmap_mode, BitmapSyncMode bitmap_mode,
bool compress, bool compress,
const char *filter_node_name, const char *filter_node_name,
BackupPerf *perf,
BlockdevOnError on_source_error, BlockdevOnError on_source_error,
BlockdevOnError on_target_error, BlockdevOnError on_target_error,
int creation_flags, int creation_flags,
@ -386,6 +439,29 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
return NULL; return NULL;
} }
cluster_size = backup_calculate_cluster_size(target, errp);
if (cluster_size < 0) {
goto error;
}
if (perf->max_workers < 1) {
error_setg(errp, "max-workers must be greater than zero");
return NULL;
}
if (perf->max_chunk < 0) {
error_setg(errp, "max-chunk must be zero (which means no limit) or "
"positive");
return NULL;
}
if (perf->max_chunk && perf->max_chunk < cluster_size) {
error_setg(errp, "Required max-chunk (%" PRIi64 ") is less than backup "
"cluster size (%" PRIi64 ")", perf->max_chunk, cluster_size);
return NULL;
}
if (sync_bitmap) { if (sync_bitmap) {
/* If we need to write to this bitmap, check that we can: */ /* If we need to write to this bitmap, check that we can: */
if (bitmap_mode != BITMAP_SYNC_MODE_NEVER && if (bitmap_mode != BITMAP_SYNC_MODE_NEVER &&
@ -418,11 +494,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error; goto error;
} }
cluster_size = backup_calculate_cluster_size(target, errp);
if (cluster_size < 0) {
goto error;
}
/* /*
* If source is in backing chain of target assume that target is going to be * If source is in backing chain of target assume that target is going to be
* used for "image fleecing", i.e. it should represent a kind of snapshot of * used for "image fleecing", i.e. it should represent a kind of snapshot of
@ -441,7 +512,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
(compress ? BDRV_REQ_WRITE_COMPRESSED : 0), (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
backup_top = bdrv_backup_top_append(bs, target, filter_node_name, backup_top = bdrv_backup_top_append(bs, target, filter_node_name,
cluster_size, write_flags, &bcs, errp); cluster_size, perf,
write_flags, &bcs, errp);
if (!backup_top) { if (!backup_top) {
goto error; goto error;
} }
@ -464,9 +536,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
job->bcs = bcs; job->bcs = bcs;
job->cluster_size = cluster_size; job->cluster_size = cluster_size;
job->len = len; job->len = len;
job->perf = *perf;
block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job);
block_copy_set_progress_meter(bcs, &job->common.job.progress); block_copy_set_progress_meter(bcs, &job->common.job.progress);
block_copy_set_speed(bcs, speed);
/* Required permissions are already taken by backup-top target */ /* Required permissions are already taken by backup-top target */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,

View File

@ -26,11 +26,34 @@
#define BLOCK_COPY_MAX_BUFFER (1 * MiB) #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB) #define BLOCK_COPY_MAX_MEM (128 * MiB)
#define BLOCK_COPY_MAX_WORKERS 64 #define BLOCK_COPY_MAX_WORKERS 64
#define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
static coroutine_fn int block_copy_task_entry(AioTask *task); static coroutine_fn int block_copy_task_entry(AioTask *task);
typedef struct BlockCopyCallState { typedef struct BlockCopyCallState {
bool failed; /* IN parameters. Initialized in block_copy_async() and never changed. */
BlockCopyState *s;
int64_t offset;
int64_t bytes;
int max_workers;
int64_t max_chunk;
bool ignore_ratelimit;
BlockCopyAsyncCallbackFunc cb;
void *cb_opaque;
/* Coroutine where async block-copy is running */
Coroutine *co;
/* To reference all call states from BlockCopyState */
QLIST_ENTRY(BlockCopyCallState) list;
/* State */
int ret;
bool finished;
QemuCoSleepState *sleep_state;
bool cancelled;
/* OUT parameters */
bool error_is_read; bool error_is_read;
} BlockCopyCallState; } BlockCopyCallState;
@ -65,7 +88,8 @@ typedef struct BlockCopyState {
bool use_copy_range; bool use_copy_range;
int64_t copy_size; int64_t copy_size;
uint64_t len; uint64_t len;
QLIST_HEAD(, BlockCopyTask) tasks; QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
QLIST_HEAD(, BlockCopyCallState) calls;
BdrvRequestFlags write_flags; BdrvRequestFlags write_flags;
@ -86,11 +110,11 @@ typedef struct BlockCopyState {
bool skip_unallocated; bool skip_unallocated;
ProgressMeter *progress; ProgressMeter *progress;
/* progress_bytes_callback: called when some copying progress is done. */
ProgressBytesCallbackFunc progress_bytes_callback;
void *progress_opaque;
SharedResource *mem; SharedResource *mem;
uint64_t speed;
RateLimit rate_limit;
} BlockCopyState; } BlockCopyState;
static BlockCopyTask *find_conflicting_task(BlockCopyState *s, static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
@ -134,10 +158,11 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
int64_t offset, int64_t bytes) int64_t offset, int64_t bytes)
{ {
BlockCopyTask *task; BlockCopyTask *task;
int64_t max_chunk = MIN_NON_ZERO(s->copy_size, call_state->max_chunk);
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap, if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
offset, offset + bytes, offset, offset + bytes,
s->copy_size, &offset, &bytes)) max_chunk, &offset, &bytes))
{ {
return NULL; return NULL;
} }
@ -218,7 +243,7 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
} }
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size, int64_t cluster_size, bool use_copy_range,
BdrvRequestFlags write_flags, Error **errp) BdrvRequestFlags write_flags, Error **errp)
{ {
BlockCopyState *s; BlockCopyState *s;
@ -260,24 +285,16 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
* We enable copy-range, but keep small copy_size, until first * We enable copy-range, but keep small copy_size, until first
* successful copy_range (look at block_copy_do_copy). * successful copy_range (look at block_copy_do_copy).
*/ */
s->use_copy_range = true; s->use_copy_range = use_copy_range;
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER); s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
} }
QLIST_INIT(&s->tasks); QLIST_INIT(&s->tasks);
QLIST_INIT(&s->calls);
return s; return s;
} }
void block_copy_set_progress_callback(
BlockCopyState *s,
ProgressBytesCallbackFunc progress_bytes_callback,
void *progress_opaque)
{
s->progress_bytes_callback = progress_bytes_callback;
s->progress_opaque = progress_opaque;
}
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm) void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
{ {
s->progress = pm; s->progress = pm;
@ -420,12 +437,11 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes, ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
&error_is_read); &error_is_read);
if (ret < 0 && !t->call_state->failed) { if (ret < 0 && !t->call_state->ret) {
t->call_state->failed = true; t->call_state->ret = ret;
t->call_state->error_is_read = error_is_read; t->call_state->error_is_read = error_is_read;
} else { } else {
progress_work_done(t->s->progress, t->bytes); progress_work_done(t->s->progress, t->bytes);
t->s->progress_bytes_callback(t->bytes, t->s->progress_opaque);
} }
co_put_to_shres(t->s->mem, t->bytes); co_put_to_shres(t->s->mem, t->bytes);
block_copy_task_end(t, ret); block_copy_task_end(t, ret);
@ -544,15 +560,17 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
* Returns 1 if dirty clusters found and successfully copied, 0 if no dirty * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
* clusters found and -errno on failure. * clusters found and -errno on failure.
*/ */
static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s, static int coroutine_fn
int64_t offset, int64_t bytes, block_copy_dirty_clusters(BlockCopyCallState *call_state)
bool *error_is_read)
{ {
BlockCopyState *s = call_state->s;
int64_t offset = call_state->offset;
int64_t bytes = call_state->bytes;
int ret = 0; int ret = 0;
bool found_dirty = false; bool found_dirty = false;
int64_t end = offset + bytes; int64_t end = offset + bytes;
AioTaskPool *aio = NULL; AioTaskPool *aio = NULL;
BlockCopyCallState call_state = {false, false};
/* /*
* block_copy() user is responsible for keeping source and target in same * block_copy() user is responsible for keeping source and target in same
@ -564,11 +582,11 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
while (bytes && aio_task_pool_status(aio) == 0) { while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) {
BlockCopyTask *task; BlockCopyTask *task;
int64_t status_bytes; int64_t status_bytes;
task = block_copy_task_create(s, &call_state, offset, bytes); task = block_copy_task_create(s, call_state, offset, bytes);
if (!task) { if (!task) {
/* No more dirty bits in the bitmap */ /* No more dirty bits in the bitmap */
trace_block_copy_skip_range(s, offset, bytes); trace_block_copy_skip_range(s, offset, bytes);
@ -599,6 +617,21 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
} }
task->zeroes = ret & BDRV_BLOCK_ZERO; task->zeroes = ret & BDRV_BLOCK_ZERO;
if (s->speed) {
if (!call_state->ignore_ratelimit) {
uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
if (ns > 0) {
block_copy_task_end(task, -EAGAIN);
g_free(task);
qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, ns,
&call_state->sleep_state);
continue;
}
}
ratelimit_calculate_delay(&s->rate_limit, task->bytes);
}
trace_block_copy_process(s, task->offset); trace_block_copy_process(s, task->offset);
co_get_from_shres(s->mem, task->bytes); co_get_from_shres(s->mem, task->bytes);
@ -607,7 +640,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
bytes = end - offset; bytes = end - offset;
if (!aio && bytes) { if (!aio && bytes) {
aio = aio_task_pool_new(BLOCK_COPY_MAX_WORKERS); aio = aio_task_pool_new(call_state->max_workers);
} }
ret = block_copy_task_run(aio, task); ret = block_copy_task_run(aio, task);
@ -633,15 +666,19 @@ out:
aio_task_pool_free(aio); aio_task_pool_free(aio);
} }
if (error_is_read && ret < 0) {
*error_is_read = call_state.error_is_read;
}
return ret < 0 ? ret : found_dirty; return ret < 0 ? ret : found_dirty;
} }
void block_copy_kick(BlockCopyCallState *call_state)
{
if (call_state->sleep_state) {
qemu_co_sleep_wake(call_state->sleep_state);
}
}
/* /*
* block_copy * block_copy_common
* *
* Copy requested region, accordingly to dirty bitmap. * Copy requested region, accordingly to dirty bitmap.
* Collaborate with parallel block_copy requests: if they succeed it will help * Collaborate with parallel block_copy requests: if they succeed it will help
@ -649,16 +686,18 @@ out:
* it means that some I/O operation failed in context of _this_ block_copy call, * it means that some I/O operation failed in context of _this_ block_copy call,
* not some parallel operation. * not some parallel operation.
*/ */
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
bool *error_is_read)
{ {
int ret; int ret;
do { QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list);
ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read);
if (ret == 0) { do {
ret = block_copy_wait_one(s, offset, bytes); ret = block_copy_dirty_clusters(call_state);
if (ret == 0 && !call_state->cancelled) {
ret = block_copy_wait_one(call_state->s, call_state->offset,
call_state->bytes);
} }
/* /*
@ -670,11 +709,110 @@ int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
* 2. We have waited for some intersecting block-copy request * 2. We have waited for some intersecting block-copy request
* It may have failed and produced new dirty bits. * It may have failed and produced new dirty bits.
*/ */
} while (ret > 0); } while (ret > 0 && !call_state->cancelled);
call_state->finished = true;
if (call_state->cb) {
call_state->cb(call_state->cb_opaque);
}
QLIST_REMOVE(call_state, list);
return ret; return ret;
} }
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
bool ignore_ratelimit)
{
BlockCopyCallState call_state = {
.s = s,
.offset = start,
.bytes = bytes,
.ignore_ratelimit = ignore_ratelimit,
.max_workers = BLOCK_COPY_MAX_WORKERS,
};
return block_copy_common(&call_state);
}
static void coroutine_fn block_copy_async_co_entry(void *opaque)
{
block_copy_common(opaque);
}
BlockCopyCallState *block_copy_async(BlockCopyState *s,
int64_t offset, int64_t bytes,
int max_workers, int64_t max_chunk,
BlockCopyAsyncCallbackFunc cb,
void *cb_opaque)
{
BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
*call_state = (BlockCopyCallState) {
.s = s,
.offset = offset,
.bytes = bytes,
.max_workers = max_workers,
.max_chunk = max_chunk,
.cb = cb,
.cb_opaque = cb_opaque,
.co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
};
qemu_coroutine_enter(call_state->co);
return call_state;
}
void block_copy_call_free(BlockCopyCallState *call_state)
{
if (!call_state) {
return;
}
assert(call_state->finished);
g_free(call_state);
}
bool block_copy_call_finished(BlockCopyCallState *call_state)
{
return call_state->finished;
}
bool block_copy_call_succeeded(BlockCopyCallState *call_state)
{
return call_state->finished && !call_state->cancelled &&
call_state->ret == 0;
}
bool block_copy_call_failed(BlockCopyCallState *call_state)
{
return call_state->finished && !call_state->cancelled &&
call_state->ret < 0;
}
bool block_copy_call_cancelled(BlockCopyCallState *call_state)
{
return call_state->cancelled;
}
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
{
assert(call_state->finished);
if (error_is_read) {
*error_is_read = call_state->error_is_read;
}
return call_state->ret;
}
void block_copy_call_cancel(BlockCopyCallState *call_state)
{
call_state->cancelled = true;
block_copy_kick(call_state);
}
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
{ {
return s->copy_bitmap; return s->copy_bitmap;
@ -684,3 +822,18 @@ void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
{ {
s->skip_unallocated = skip; s->skip_unallocated = skip;
} }
void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
{
s->speed = speed;
if (speed > 0) {
ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
}
/*
* Note: it's good to kick all call states from here, but it should be done
* only from a coroutine, to not crash if s->calls list changed while
* entering one call. So for now, the only user of this function kicks its
* only one call_state by hand.
*/
}

View File

@ -23,11 +23,26 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "block/block_int.h" #include "block/block_int.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "block/copy-on-read.h"
typedef struct BDRVStateCOR {
bool active;
BlockDriverState *bottom_bs;
bool chain_frozen;
} BDRVStateCOR;
static int cor_open(BlockDriverState *bs, QDict *options, int flags, static int cor_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp) Error **errp)
{ {
BlockDriverState *bottom_bs = NULL;
BDRVStateCOR *state = bs->opaque;
/* Find a bottom node name, if any */
const char *bottom_node = qdict_get_try_str(options, "bottom");
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
false, errp); false, errp);
@ -35,6 +50,8 @@ static int cor_open(BlockDriverState *bs, QDict *options, int flags,
return -EINVAL; return -EINVAL;
} }
bs->supported_read_flags = BDRV_REQ_PREFETCH;
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED | bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags); (BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
@ -42,6 +59,44 @@ static int cor_open(BlockDriverState *bs, QDict *options, int flags,
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) & ((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
bs->file->bs->supported_zero_flags); bs->file->bs->supported_zero_flags);
if (bottom_node) {
bottom_bs = bdrv_find_node(bottom_node);
if (!bottom_bs) {
error_setg(errp, "Bottom node '%s' not found", bottom_node);
qdict_del(options, "bottom");
return -EINVAL;
}
qdict_del(options, "bottom");
if (!bottom_bs->drv) {
error_setg(errp, "Bottom node '%s' not opened", bottom_node);
return -EINVAL;
}
if (bottom_bs->drv->is_filter) {
error_setg(errp, "Bottom node '%s' is a filter", bottom_node);
return -EINVAL;
}
if (bdrv_freeze_backing_chain(bs, bottom_bs, errp) < 0) {
return -EINVAL;
}
state->chain_frozen = true;
/*
* We do freeze the chain, so it shouldn't be removed. Still, storing a
* pointer worth bdrv_ref().
*/
bdrv_ref(bottom_bs);
}
state->active = true;
state->bottom_bs = bottom_bs;
/*
* We don't need to call bdrv_child_refresh_perms() now as the permissions
* will be updated later when the filter node gets its parent.
*/
return 0; return 0;
} }
@ -57,6 +112,17 @@ static void cor_child_perm(BlockDriverState *bs, BdrvChild *c,
uint64_t perm, uint64_t shared, uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared) uint64_t *nperm, uint64_t *nshared)
{ {
BDRVStateCOR *s = bs->opaque;
if (!s->active) {
/*
* While the filter is being removed
*/
*nperm = 0;
*nshared = BLK_PERM_ALL;
return;
}
*nperm = perm & PERM_PASSTHROUGH; *nperm = perm & PERM_PASSTHROUGH;
*nshared = (shared & PERM_PASSTHROUGH) | PERM_UNCHANGED; *nshared = (shared & PERM_PASSTHROUGH) | PERM_UNCHANGED;
@ -74,21 +140,67 @@ static int64_t cor_getlength(BlockDriverState *bs)
} }
static int coroutine_fn cor_co_preadv(BlockDriverState *bs, static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags) QEMUIOVector *qiov,
size_t qiov_offset,
int flags)
{ {
return bdrv_co_preadv(bs->file, offset, bytes, qiov, int64_t n;
flags | BDRV_REQ_COPY_ON_READ); int local_flags;
int ret;
BDRVStateCOR *state = bs->opaque;
if (!state->bottom_bs) {
return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
flags | BDRV_REQ_COPY_ON_READ);
}
while (bytes) {
local_flags = flags;
/* In case of failure, try to copy-on-read anyway */
ret = bdrv_is_allocated(bs->file->bs, offset, bytes, &n);
if (ret <= 0) {
ret = bdrv_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
state->bottom_bs, true, offset,
n, &n);
if (ret > 0 || ret < 0) {
local_flags |= BDRV_REQ_COPY_ON_READ;
}
/* Finish earlier if the end of a backing file has been reached */
if (n == 0) {
break;
}
}
/* Skip if neither read nor write are needed */
if ((local_flags & (BDRV_REQ_PREFETCH | BDRV_REQ_COPY_ON_READ)) !=
BDRV_REQ_PREFETCH) {
ret = bdrv_co_preadv_part(bs->file, offset, n, qiov, qiov_offset,
local_flags);
if (ret < 0) {
return ret;
}
}
offset += n;
qiov_offset += n;
bytes -= n;
}
return 0;
} }
static int coroutine_fn cor_co_pwritev(BlockDriverState *bs, static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, uint64_t offset,
QEMUIOVector *qiov, int flags) uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset, int flags)
{ {
return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); flags);
} }
@ -129,16 +241,31 @@ static void cor_lock_medium(BlockDriverState *bs, bool locked)
} }
static void cor_close(BlockDriverState *bs)
{
BDRVStateCOR *s = bs->opaque;
if (s->chain_frozen) {
s->chain_frozen = false;
bdrv_unfreeze_backing_chain(bs, s->bottom_bs);
}
bdrv_unref(s->bottom_bs);
}
static BlockDriver bdrv_copy_on_read = { static BlockDriver bdrv_copy_on_read = {
.format_name = "copy-on-read", .format_name = "copy-on-read",
.instance_size = sizeof(BDRVStateCOR),
.bdrv_open = cor_open, .bdrv_open = cor_open,
.bdrv_close = cor_close,
.bdrv_child_perm = cor_child_perm, .bdrv_child_perm = cor_child_perm,
.bdrv_getlength = cor_getlength, .bdrv_getlength = cor_getlength,
.bdrv_co_preadv = cor_co_preadv, .bdrv_co_preadv_part = cor_co_preadv_part,
.bdrv_co_pwritev = cor_co_pwritev, .bdrv_co_pwritev_part = cor_co_pwritev_part,
.bdrv_co_pwrite_zeroes = cor_co_pwrite_zeroes, .bdrv_co_pwrite_zeroes = cor_co_pwrite_zeroes,
.bdrv_co_pdiscard = cor_co_pdiscard, .bdrv_co_pdiscard = cor_co_pdiscard,
.bdrv_co_pwritev_compressed = cor_co_pwritev_compressed, .bdrv_co_pwritev_compressed = cor_co_pwritev_compressed,
@ -150,6 +277,39 @@ static BlockDriver bdrv_copy_on_read = {
.is_filter = true, .is_filter = true,
}; };
void bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs)
{
BdrvChild *child;
BlockDriverState *bs;
BDRVStateCOR *s = cor_filter_bs->opaque;
child = bdrv_filter_child(cor_filter_bs);
if (!child) {
return;
}
bs = child->bs;
/* Retain the BDS until we complete the graph change. */
bdrv_ref(bs);
/* Hold a guest back from writing while permissions are being reset. */
bdrv_drained_begin(bs);
/* Drop permissions before the graph change. */
s->active = false;
/* unfreeze, as otherwise bdrv_replace_node() will fail */
if (s->chain_frozen) {
s->chain_frozen = false;
bdrv_unfreeze_backing_chain(cor_filter_bs, s->bottom_bs);
}
bdrv_child_refresh_perms(cor_filter_bs, child, &error_abort);
bdrv_replace_node(cor_filter_bs, bs, &error_abort);
bdrv_drained_end(bs);
bdrv_unref(bs);
bdrv_unref(cor_filter_bs);
}
static void bdrv_copy_on_read_init(void) static void bdrv_copy_on_read_init(void)
{ {
bdrv_register(&bdrv_copy_on_read); bdrv_register(&bdrv_copy_on_read);

32
block/copy-on-read.h Normal file
View File

@ -0,0 +1,32 @@
/*
* Copy-on-read filter block driver
*
* The filter driver performs Copy-On-Read (COR) operations
*
* Copyright (c) 2018-2020 Virtuozzo International GmbH.
*
* Author:
* Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef BLOCK_COPY_ON_READ
#define BLOCK_COPY_ON_READ
#include "block/block_int.h"
void bdrv_cor_filter_drop(BlockDriverState *cor_filter_bs);
#endif /* BLOCK_COPY_ON_READ */

View File

@ -216,6 +216,20 @@ typedef struct RawPosixAIOData {
static int cdrom_reopen(BlockDriverState *bs); static int cdrom_reopen(BlockDriverState *bs);
#endif #endif
/*
* Elide EAGAIN and EACCES details when failing to lock, as this
* indicates that the specified file region is already locked by
* another process, which is considered a common scenario.
*/
#define raw_lock_error_setg_errno(errp, err, fmt, ...) \
do { \
if ((err) == EAGAIN || (err) == EACCES) { \
error_setg((errp), (fmt), ## __VA_ARGS__); \
} else { \
error_setg_errno((errp), (err), (fmt), ## __VA_ARGS__); \
} \
} while (0)
#if defined(__NetBSD__) #if defined(__NetBSD__)
static int raw_normalize_devicepath(const char **filename, Error **errp) static int raw_normalize_devicepath(const char **filename, Error **errp)
{ {
@ -836,7 +850,8 @@ static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
if ((perm_lock_bits & bit) && !(locked_perm & bit)) { if ((perm_lock_bits & bit) && !(locked_perm & bit)) {
ret = qemu_lock_fd(fd, off, 1, false); ret = qemu_lock_fd(fd, off, 1, false);
if (ret) { if (ret) {
error_setg(errp, "Failed to lock byte %d", off); raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
off);
return ret; return ret;
} else if (s) { } else if (s) {
s->locked_perm |= bit; s->locked_perm |= bit;
@ -844,7 +859,7 @@ static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
} else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) { } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) {
ret = qemu_unlock_fd(fd, off, 1); ret = qemu_unlock_fd(fd, off, 1);
if (ret) { if (ret) {
error_setg(errp, "Failed to unlock byte %d", off); error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
return ret; return ret;
} else if (s) { } else if (s) {
s->locked_perm &= ~bit; s->locked_perm &= ~bit;
@ -857,7 +872,8 @@ static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) { if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) {
ret = qemu_lock_fd(fd, off, 1, false); ret = qemu_lock_fd(fd, off, 1, false);
if (ret) { if (ret) {
error_setg(errp, "Failed to lock byte %d", off); raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
off);
return ret; return ret;
} else if (s) { } else if (s) {
s->locked_shared_perm |= bit; s->locked_shared_perm |= bit;
@ -866,7 +882,7 @@ static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
!(shared_perm_lock_bits & bit)) { !(shared_perm_lock_bits & bit)) {
ret = qemu_unlock_fd(fd, off, 1); ret = qemu_unlock_fd(fd, off, 1);
if (ret) { if (ret) {
error_setg(errp, "Failed to unlock byte %d", off); error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
return ret; return ret;
} else if (s) { } else if (s) {
s->locked_shared_perm &= ~bit; s->locked_shared_perm &= ~bit;
@ -890,9 +906,10 @@ static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
ret = qemu_lock_fd_test(fd, off, 1, true); ret = qemu_lock_fd_test(fd, off, 1, true);
if (ret) { if (ret) {
char *perm_name = bdrv_perm_names(p); char *perm_name = bdrv_perm_names(p);
error_setg(errp,
"Failed to get \"%s\" lock", raw_lock_error_setg_errno(errp, -ret,
perm_name); "Failed to get \"%s\" lock",
perm_name);
g_free(perm_name); g_free(perm_name);
return ret; return ret;
} }
@ -905,9 +922,10 @@ static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
ret = qemu_lock_fd_test(fd, off, 1, true); ret = qemu_lock_fd_test(fd, off, 1, true);
if (ret) { if (ret) {
char *perm_name = bdrv_perm_names(p); char *perm_name = bdrv_perm_names(p);
error_setg(errp,
"Failed to get shared \"%s\" lock", raw_lock_error_setg_errno(errp, -ret,
perm_name); "Failed to get shared \"%s\" lock",
perm_name);
g_free(perm_name); g_free(perm_name);
return ret; return ret;
} }

View File

@ -1453,6 +1453,9 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
if (flags & BDRV_REQ_COPY_ON_READ) { if (flags & BDRV_REQ_COPY_ON_READ) {
int64_t pnum; int64_t pnum;
/* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
flags &= ~BDRV_REQ_COPY_ON_READ;
ret = bdrv_is_allocated(bs, offset, bytes, &pnum); ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
if (ret < 0) { if (ret < 0) {
goto out; goto out;
@ -1474,9 +1477,11 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
goto out; goto out;
} }
assert(!(flags & ~bs->supported_read_flags));
max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
if (bytes <= max_bytes && bytes <= max_transfer) { if (bytes <= max_bytes && bytes <= max_transfer) {
ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0); ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
goto out; goto out;
} }
@ -1489,7 +1494,8 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
num, qiov, num, qiov,
qiov_offset + bytes - bytes_remaining, 0); qiov_offset + bytes - bytes_remaining,
flags);
max_bytes -= num; max_bytes -= num;
} else { } else {
num = bytes_remaining; num = bytes_remaining;

View File

@ -507,9 +507,10 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict)
int64_t speed = qdict_get_try_int(qdict, "speed", 0); int64_t speed = qdict_get_try_int(qdict, "speed", 0);
qmp_block_stream(true, device, device, base != NULL, base, false, NULL, qmp_block_stream(true, device, device, base != NULL, base, false, NULL,
false, NULL, qdict_haskey(qdict, "speed"), speed, true, false, NULL, false, NULL,
BLOCKDEV_ON_ERROR_REPORT, false, false, false, false, qdict_haskey(qdict, "speed"), speed, true,
&error); BLOCKDEV_ON_ERROR_REPORT, false, NULL, false, false, false,
false, &error);
hmp_handle_error(mon, error); hmp_handle_error(mon, error);
} }

View File

@ -454,6 +454,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
int64_t active_length, hidden_length, disk_length; int64_t active_length, hidden_length, disk_length;
AioContext *aio_context; AioContext *aio_context;
Error *local_err = NULL; Error *local_err = NULL;
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
aio_context = bdrv_get_aio_context(bs); aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
@ -558,6 +559,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
s->backup_job = backup_job_create( s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs, NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, NULL, 0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, NULL,
&perf,
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL, BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
backup_job_completed, bs, NULL, &local_err); backup_job_completed, bs, NULL, &local_err);

View File

@ -17,8 +17,10 @@
#include "block/blockjob_int.h" #include "block/blockjob_int.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "qapi/qmp/qerror.h" #include "qapi/qmp/qerror.h"
#include "qapi/qmp/qdict.h"
#include "qemu/ratelimit.h" #include "qemu/ratelimit.h"
#include "sysemu/block-backend.h" #include "sysemu/block-backend.h"
#include "block/copy-on-read.h"
enum { enum {
/* /*
@ -33,10 +35,11 @@ typedef struct StreamBlockJob {
BlockJob common; BlockJob common;
BlockDriverState *base_overlay; /* COW overlay (stream from this) */ BlockDriverState *base_overlay; /* COW overlay (stream from this) */
BlockDriverState *above_base; /* Node directly above the base */ BlockDriverState *above_base; /* Node directly above the base */
BlockDriverState *cor_filter_bs;
BlockDriverState *target_bs;
BlockdevOnError on_error; BlockdevOnError on_error;
char *backing_file_str; char *backing_file_str;
bool bs_read_only; bool bs_read_only;
bool chain_frozen;
} StreamBlockJob; } StreamBlockJob;
static int coroutine_fn stream_populate(BlockBackend *blk, static int coroutine_fn stream_populate(BlockBackend *blk,
@ -44,39 +47,28 @@ static int coroutine_fn stream_populate(BlockBackend *blk,
{ {
assert(bytes < SIZE_MAX); assert(bytes < SIZE_MAX);
return blk_co_preadv(blk, offset, bytes, NULL, return blk_co_preadv(blk, offset, bytes, NULL, BDRV_REQ_PREFETCH);
BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
}
static void stream_abort(Job *job)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
if (s->chain_frozen) {
BlockJob *bjob = &s->common;
bdrv_unfreeze_backing_chain(blk_bs(bjob->blk), s->above_base);
}
} }
static int stream_prepare(Job *job) static int stream_prepare(Job *job)
{ {
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common; BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
BlockDriverState *bs = blk_bs(bjob->blk);
BlockDriverState *unfiltered_bs = bdrv_skip_filters(bs);
BlockDriverState *base = bdrv_filter_or_cow_bs(s->above_base); BlockDriverState *base = bdrv_filter_or_cow_bs(s->above_base);
BlockDriverState *unfiltered_base = bdrv_skip_filters(base);
Error *local_err = NULL; Error *local_err = NULL;
int ret = 0; int ret = 0;
bdrv_unfreeze_backing_chain(bs, s->above_base); /* We should drop filter at this point, as filter hold the backing chain */
s->chain_frozen = false; bdrv_cor_filter_drop(s->cor_filter_bs);
s->cor_filter_bs = NULL;
if (bdrv_cow_child(unfiltered_bs)) { if (bdrv_cow_child(unfiltered_bs)) {
const char *base_id = NULL, *base_fmt = NULL; const char *base_id = NULL, *base_fmt = NULL;
if (base) { if (unfiltered_base) {
base_id = s->backing_file_str; base_id = s->backing_file_str ?: unfiltered_base->filename;
if (base->drv) { if (unfiltered_base->drv) {
base_fmt = base->drv->format_name; base_fmt = unfiltered_base->drv->format_name;
} }
} }
bdrv_set_backing_hd(unfiltered_bs, base, &local_err); bdrv_set_backing_hd(unfiltered_bs, base, &local_err);
@ -94,13 +86,17 @@ static void stream_clean(Job *job)
{ {
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common; BlockJob *bjob = &s->common;
BlockDriverState *bs = blk_bs(bjob->blk);
if (s->cor_filter_bs) {
bdrv_cor_filter_drop(s->cor_filter_bs);
s->cor_filter_bs = NULL;
}
/* Reopen the image back in read-only mode if necessary */ /* Reopen the image back in read-only mode if necessary */
if (s->bs_read_only) { if (s->bs_read_only) {
/* Give up write permissions before making it read-only */ /* Give up write permissions before making it read-only */
blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
bdrv_reopen_set_read_only(bs, true, NULL); bdrv_reopen_set_read_only(s->target_bs, true, NULL);
} }
g_free(s->backing_file_str); g_free(s->backing_file_str);
@ -110,9 +106,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
{ {
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockBackend *blk = s->common.blk; BlockBackend *blk = s->common.blk;
BlockDriverState *bs = blk_bs(blk); BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
BlockDriverState *unfiltered_bs = bdrv_skip_filters(bs);
bool enable_cor = !bdrv_cow_child(s->base_overlay);
int64_t len; int64_t len;
int64_t offset = 0; int64_t offset = 0;
uint64_t delay_ns = 0; uint64_t delay_ns = 0;
@ -124,21 +118,12 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
return 0; return 0;
} }
len = bdrv_getlength(bs); len = bdrv_getlength(s->target_bs);
if (len < 0) { if (len < 0) {
return len; return len;
} }
job_progress_set_remaining(&s->common.job, len); job_progress_set_remaining(&s->common.job, len);
/* Turn on copy-on-read for the whole block device so that guest read
* requests help us make progress. Only do this when copying the entire
* backing chain since the copy-on-read operation does not take base into
* account.
*/
if (enable_cor) {
bdrv_enable_copy_on_read(bs);
}
for ( ; offset < len; offset += n) { for ( ; offset < len; offset += n) {
bool copy; bool copy;
int ret; int ret;
@ -197,10 +182,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
} }
} }
if (enable_cor) {
bdrv_disable_copy_on_read(bs);
}
/* Do not remove the backing file if an error was there but ignored. */ /* Do not remove the backing file if an error was there but ignored. */
return error; return error;
} }
@ -212,7 +193,6 @@ static const BlockJobDriver stream_job_driver = {
.free = block_job_free, .free = block_job_free,
.run = stream_run, .run = stream_run,
.prepare = stream_prepare, .prepare = stream_prepare,
.abort = stream_abort,
.clean = stream_clean, .clean = stream_clean,
.user_resume = block_job_user_resume, .user_resume = block_job_user_resume,
}, },
@ -220,59 +200,113 @@ static const BlockJobDriver stream_job_driver = {
void stream_start(const char *job_id, BlockDriverState *bs, void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str, BlockDriverState *base, const char *backing_file_str,
BlockDriverState *bottom,
int creation_flags, int64_t speed, int creation_flags, int64_t speed,
BlockdevOnError on_error, Error **errp) BlockdevOnError on_error,
const char *filter_node_name,
Error **errp)
{ {
StreamBlockJob *s; StreamBlockJob *s;
BlockDriverState *iter; BlockDriverState *iter;
bool bs_read_only; bool bs_read_only;
int basic_flags = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; int basic_flags = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
BlockDriverState *base_overlay = bdrv_find_overlay(bs, base); BlockDriverState *base_overlay;
BlockDriverState *cor_filter_bs = NULL;
BlockDriverState *above_base; BlockDriverState *above_base;
QDict *opts;
if (!base_overlay) { assert(!(base && bottom));
error_setg(errp, "'%s' is not in the backing chain of '%s'", assert(!(backing_file_str && bottom));
base->node_name, bs->node_name);
return;
}
/* if (bottom) {
* Find the node directly above @base. @base_overlay is a COW overlay, so /*
* it must have a bdrv_cow_child(), but it is the immediate overlay of * New simple interface. The code is written in terms of old interface
* @base, so between the two there can only be filters. * with @base parameter (still, it doesn't freeze link to base, so in
*/ * this mean old code is correct for new interface). So, for now, just
above_base = base_overlay; * emulate base_overlay and above_base. Still, when old interface
if (bdrv_cow_bs(above_base) != base) { * finally removed, we should refactor code to use only "bottom", but
above_base = bdrv_cow_bs(above_base); * not "*base*" things.
while (bdrv_filter_bs(above_base) != base) { */
above_base = bdrv_filter_bs(above_base); assert(!bottom->drv->is_filter);
base_overlay = above_base = bottom;
} else {
base_overlay = bdrv_find_overlay(bs, base);
if (!base_overlay) {
error_setg(errp, "'%s' is not in the backing chain of '%s'",
base->node_name, bs->node_name);
return;
} }
}
if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) { /*
return; * Find the node directly above @base. @base_overlay is a COW overlay,
* so it must have a bdrv_cow_child(), but it is the immediate overlay
* of @base, so between the two there can only be filters.
*/
above_base = base_overlay;
if (bdrv_cow_bs(above_base) != base) {
above_base = bdrv_cow_bs(above_base);
while (bdrv_filter_bs(above_base) != base) {
above_base = bdrv_filter_bs(above_base);
}
}
} }
/* Make sure that the image is opened in read-write mode */ /* Make sure that the image is opened in read-write mode */
bs_read_only = bdrv_is_read_only(bs); bs_read_only = bdrv_is_read_only(bs);
if (bs_read_only) { if (bs_read_only) {
if (bdrv_reopen_set_read_only(bs, false, errp) != 0) { int ret;
bs_read_only = false; /* Hold the chain during reopen */
goto fail; if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) {
return;
}
ret = bdrv_reopen_set_read_only(bs, false, errp);
/* failure, or cor-filter will hold the chain */
bdrv_unfreeze_backing_chain(bs, above_base);
if (ret < 0) {
return;
} }
} }
/* Prevent concurrent jobs trying to modify the graph structure here, we opts = qdict_new();
* already have our own plans. Also don't allow resize as the image size is
* queried only at the job start and then cached. */ qdict_put_str(opts, "driver", "copy-on-read");
s = block_job_create(job_id, &stream_job_driver, NULL, bs, qdict_put_str(opts, "file", bdrv_get_node_name(bs));
basic_flags | BLK_PERM_GRAPH_MOD, /* Pass the base_overlay node name as 'bottom' to COR driver */
qdict_put_str(opts, "bottom", base_overlay->node_name);
if (filter_node_name) {
qdict_put_str(opts, "node-name", filter_node_name);
}
cor_filter_bs = bdrv_insert_node(bs, opts, BDRV_O_RDWR, errp);
if (!cor_filter_bs) {
goto fail;
}
if (!filter_node_name) {
cor_filter_bs->implicit = true;
}
s = block_job_create(job_id, &stream_job_driver, NULL, cor_filter_bs,
BLK_PERM_CONSISTENT_READ,
basic_flags | BLK_PERM_WRITE, basic_flags | BLK_PERM_WRITE,
speed, creation_flags, NULL, NULL, errp); speed, creation_flags, NULL, NULL, errp);
if (!s) { if (!s) {
goto fail; goto fail;
} }
/*
* Prevent concurrent jobs trying to modify the graph structure here, we
* already have our own plans. Also don't allow resize as the image size is
* queried only at the job start and then cached.
*/
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
basic_flags | BLK_PERM_WRITE, &error_abort)) {
goto fail;
}
/* Block all intermediate nodes between bs and base, because they will /* Block all intermediate nodes between bs and base, because they will
* disappear from the chain after this operation. The streaming job reads * disappear from the chain after this operation. The streaming job reads
* every block only once, assuming that it doesn't change, so forbid writes * every block only once, assuming that it doesn't change, so forbid writes
@ -293,8 +327,9 @@ void stream_start(const char *job_id, BlockDriverState *bs,
s->base_overlay = base_overlay; s->base_overlay = base_overlay;
s->above_base = above_base; s->above_base = above_base;
s->backing_file_str = g_strdup(backing_file_str); s->backing_file_str = g_strdup(backing_file_str);
s->cor_filter_bs = cor_filter_bs;
s->target_bs = bs;
s->bs_read_only = bs_read_only; s->bs_read_only = bs_read_only;
s->chain_frozen = true;
s->on_error = on_error; s->on_error = on_error;
trace_stream_start(bs, base, s); trace_stream_start(bs, base, s);
@ -302,8 +337,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
return; return;
fail: fail:
if (cor_filter_bs) {
bdrv_cor_filter_drop(cor_filter_bs);
}
if (bs_read_only) { if (bs_read_only) {
bdrv_reopen_set_read_only(bs, true, NULL); bdrv_reopen_set_read_only(bs, true, NULL);
} }
bdrv_unfreeze_backing_chain(bs, above_base);
} }

View File

@ -2500,19 +2500,39 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
bool has_base, const char *base, bool has_base, const char *base,
bool has_base_node, const char *base_node, bool has_base_node, const char *base_node,
bool has_backing_file, const char *backing_file, bool has_backing_file, const char *backing_file,
bool has_bottom, const char *bottom,
bool has_speed, int64_t speed, bool has_speed, int64_t speed,
bool has_on_error, BlockdevOnError on_error, bool has_on_error, BlockdevOnError on_error,
bool has_filter_node_name, const char *filter_node_name,
bool has_auto_finalize, bool auto_finalize, bool has_auto_finalize, bool auto_finalize,
bool has_auto_dismiss, bool auto_dismiss, bool has_auto_dismiss, bool auto_dismiss,
Error **errp) Error **errp)
{ {
BlockDriverState *bs, *iter; BlockDriverState *bs, *iter, *iter_end;
BlockDriverState *base_bs = NULL; BlockDriverState *base_bs = NULL;
BlockDriverState *bottom_bs = NULL;
AioContext *aio_context; AioContext *aio_context;
Error *local_err = NULL; Error *local_err = NULL;
const char *base_name = NULL;
int job_flags = JOB_DEFAULT; int job_flags = JOB_DEFAULT;
if (has_base && has_base_node) {
error_setg(errp, "'base' and 'base-node' cannot be specified "
"at the same time");
return;
}
if (has_base && has_bottom) {
error_setg(errp, "'base' and 'bottom' cannot be specified "
"at the same time");
return;
}
if (has_bottom && has_base_node) {
error_setg(errp, "'bottom' and 'base-node' cannot be specified "
"at the same time");
return;
}
if (!has_on_error) { if (!has_on_error) {
on_error = BLOCKDEV_ON_ERROR_REPORT; on_error = BLOCKDEV_ON_ERROR_REPORT;
} }
@ -2525,12 +2545,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
aio_context = bdrv_get_aio_context(bs); aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context); aio_context_acquire(aio_context);
if (has_base && has_base_node) {
error_setg(errp, "'base' and 'base-node' cannot be specified "
"at the same time");
goto out;
}
if (has_base) { if (has_base) {
base_bs = bdrv_find_backing_image(bs, base); base_bs = bdrv_find_backing_image(bs, base);
if (base_bs == NULL) { if (base_bs == NULL) {
@ -2538,7 +2552,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
goto out; goto out;
} }
assert(bdrv_get_aio_context(base_bs) == aio_context); assert(bdrv_get_aio_context(base_bs) == aio_context);
base_name = base;
} }
if (has_base_node) { if (has_base_node) {
@ -2553,11 +2566,35 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
} }
assert(bdrv_get_aio_context(base_bs) == aio_context); assert(bdrv_get_aio_context(base_bs) == aio_context);
bdrv_refresh_filename(base_bs); bdrv_refresh_filename(base_bs);
base_name = base_bs->filename;
} }
/* Check for op blockers in the whole chain between bs and base */ if (has_bottom) {
for (iter = bs; iter && iter != base_bs; bottom_bs = bdrv_lookup_bs(NULL, bottom, errp);
if (!bottom_bs) {
goto out;
}
if (!bottom_bs->drv) {
error_setg(errp, "Node '%s' is not open", bottom);
goto out;
}
if (bottom_bs->drv->is_filter) {
error_setg(errp, "Node '%s' is a filter, use a non-filter node "
"as 'bottom'", bottom);
goto out;
}
if (!bdrv_chain_contains(bs, bottom_bs)) {
error_setg(errp, "Node '%s' is not in a chain starting from '%s'",
bottom, device);
goto out;
}
assert(bdrv_get_aio_context(bottom_bs) == aio_context);
}
/*
* Check for op blockers in the whole chain between bs and base (or bottom)
*/
iter_end = has_bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
for (iter = bs; iter && iter != iter_end;
iter = bdrv_filter_or_cow_bs(iter)) iter = bdrv_filter_or_cow_bs(iter))
{ {
if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) { if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
@ -2573,9 +2610,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
goto out; goto out;
} }
/* backing_file string overrides base bs filename */
base_name = has_backing_file ? backing_file : base_name;
if (has_auto_finalize && !auto_finalize) { if (has_auto_finalize && !auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE; job_flags |= JOB_MANUAL_FINALIZE;
} }
@ -2583,8 +2617,9 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
job_flags |= JOB_MANUAL_DISMISS; job_flags |= JOB_MANUAL_DISMISS;
} }
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name, stream_start(has_job_id ? job_id : NULL, bs, base_bs, backing_file,
job_flags, has_speed ? speed : 0, on_error, &local_err); bottom_bs, job_flags, has_speed ? speed : 0, on_error,
filter_node_name, &local_err);
if (local_err) { if (local_err) {
error_propagate(errp, local_err); error_propagate(errp, local_err);
goto out; goto out;
@ -2794,6 +2829,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
{ {
BlockJob *job = NULL; BlockJob *job = NULL;
BdrvDirtyBitmap *bmap = NULL; BdrvDirtyBitmap *bmap = NULL;
BackupPerf perf = { .max_workers = 64 };
int job_flags = JOB_DEFAULT; int job_flags = JOB_DEFAULT;
if (!backup->has_speed) { if (!backup->has_speed) {
@ -2818,6 +2854,18 @@ static BlockJob *do_backup_common(BackupCommon *backup,
backup->compress = false; backup->compress = false;
} }
if (backup->x_perf) {
if (backup->x_perf->has_use_copy_range) {
perf.use_copy_range = backup->x_perf->use_copy_range;
}
if (backup->x_perf->has_max_workers) {
perf.max_workers = backup->x_perf->max_workers;
}
if (backup->x_perf->has_max_chunk) {
perf.max_chunk = backup->x_perf->max_chunk;
}
}
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) || if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
(backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) { (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
/* done before desugaring 'incremental' to print the right message */ /* done before desugaring 'incremental' to print the right message */
@ -2891,6 +2939,7 @@ static BlockJob *do_backup_common(BackupCommon *backup,
backup->sync, bmap, backup->bitmap_mode, backup->sync, bmap, backup->bitmap_mode,
backup->compress, backup->compress,
backup->filter_node_name, backup->filter_node_name,
&perf,
backup->on_source_error, backup->on_source_error,
backup->on_target_error, backup->on_target_error,
job_flags, NULL, NULL, txn, errp); job_flags, NULL, NULL, txn, errp);

View File

@ -256,6 +256,7 @@ static bool job_timer_pending(Job *job)
void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
{ {
const BlockJobDriver *drv = block_job_driver(job);
int64_t old_speed = job->speed; int64_t old_speed = job->speed;
if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp)) { if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp)) {
@ -270,6 +271,11 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME); ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
job->speed = speed; job->speed = speed;
if (drv->set_speed) {
drv->set_speed(job, speed);
}
if (speed && speed <= old_speed) { if (speed && speed <= old_speed) {
return; return;
} }

View File

@ -18,19 +18,15 @@
#include "block/block.h" #include "block/block.h"
#include "qemu/co-shared-resource.h" #include "qemu/co-shared-resource.h"
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque); typedef void (*BlockCopyAsyncCallbackFunc)(void *opaque);
typedef struct BlockCopyState BlockCopyState; typedef struct BlockCopyState BlockCopyState;
typedef struct BlockCopyCallState BlockCopyCallState;
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size, int64_t cluster_size, bool use_copy_range,
BdrvRequestFlags write_flags, BdrvRequestFlags write_flags,
Error **errp); Error **errp);
void block_copy_set_progress_callback(
BlockCopyState *s,
ProgressBytesCallbackFunc progress_bytes_callback,
void *progress_opaque);
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm); void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
void block_copy_state_free(BlockCopyState *s); void block_copy_state_free(BlockCopyState *s);
@ -39,7 +35,56 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count); int64_t offset, int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
bool *error_is_read); bool ignore_ratelimit);
/*
* Run block-copy in a coroutine, create corresponding BlockCopyCallState
* object and return pointer to it. Never returns NULL.
*
* Caller is responsible to call block_copy_call_free() to free
* BlockCopyCallState object.
*
* @max_workers means maximum of parallel coroutines to execute sub-requests,
* must be > 0.
*
* @max_chunk means maximum length for one IO operation. Zero means unlimited.
*/
BlockCopyCallState *block_copy_async(BlockCopyState *s,
int64_t offset, int64_t bytes,
int max_workers, int64_t max_chunk,
BlockCopyAsyncCallbackFunc cb,
void *cb_opaque);
/*
* Free finished BlockCopyCallState. Trying to free running
* block-copy will crash.
*/
void block_copy_call_free(BlockCopyCallState *call_state);
/*
* Note, that block-copy call is marked finished prior to calling
* the callback.
*/
bool block_copy_call_finished(BlockCopyCallState *call_state);
bool block_copy_call_succeeded(BlockCopyCallState *call_state);
bool block_copy_call_failed(BlockCopyCallState *call_state);
bool block_copy_call_cancelled(BlockCopyCallState *call_state);
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read);
void block_copy_set_speed(BlockCopyState *s, uint64_t speed);
void block_copy_kick(BlockCopyCallState *call_state);
/*
* Cancel running block-copy call.
*
* Cancel leaves block-copy state valid: dirty bits are correct and you may use
* cancel + <run block_copy with same parameters> to emulate pause/resume.
*
* Note also, that the cancel is async: it only marks block-copy call to be
* cancelled. So, the call may be cancelled (block_copy_call_cancelled() reports
* true) but not yet finished (block_copy_call_finished() reports false).
*/
void block_copy_call_cancel(BlockCopyCallState *call_state);
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s); BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip); void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);

View File

@ -72,9 +72,11 @@ typedef enum {
BDRV_REQ_NO_FALLBACK = 0x100, BDRV_REQ_NO_FALLBACK = 0x100,
/* /*
* BDRV_REQ_PREFETCH may be used only together with BDRV_REQ_COPY_ON_READ * BDRV_REQ_PREFETCH makes sense only in the context of copy-on-read
* on read request and means that caller doesn't really need data to be * (i.e., together with the BDRV_REQ_COPY_ON_READ flag or when a COR
* written to qiov parameter which may be NULL. * filter is involved), in which case it signals that the COR operation
* need not read the data into memory (qiov) but only ensure they are
* copied to the top layer (i.e., that COR operation is done).
*/ */
BDRV_REQ_PREFETCH = 0x200, BDRV_REQ_PREFETCH = 0x200,
@ -358,6 +360,8 @@ void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
Error **errp); Error **errp);
void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
Error **errp); Error **errp);
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options,
int flags, Error **errp);
int bdrv_parse_aio(const char *mode, int *flags); int bdrv_parse_aio(const char *mode, int *flags);
int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);

View File

@ -881,6 +881,10 @@ struct BlockDriverState {
/* I/O Limits */ /* I/O Limits */
BlockLimits bl; BlockLimits bl;
/*
* Flags honored during pread
*/
unsigned int supported_read_flags;
/* Flags honored during pwrite (so far: BDRV_REQ_FUA, /* Flags honored during pwrite (so far: BDRV_REQ_FUA,
* BDRV_REQ_WRITE_UNCHANGED). * BDRV_REQ_WRITE_UNCHANGED).
* If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
@ -1143,6 +1147,9 @@ int is_windows_drive(const char *filename);
* See @BlockJobCreateFlags * See @BlockJobCreateFlags
* @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @on_error: The action to take upon error. * @on_error: The action to take upon error.
* @filter_node_name: The node name that should be assigned to the filter
* driver that the stream job inserts into the graph above
* @bs. NULL means that a node name should be autogenerated.
* @errp: Error object. * @errp: Error object.
* *
* Start a streaming operation on @bs. Clusters that are unallocated * Start a streaming operation on @bs. Clusters that are unallocated
@ -1154,8 +1161,11 @@ int is_windows_drive(const char *filename);
*/ */
void stream_start(const char *job_id, BlockDriverState *bs, void stream_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, const char *backing_file_str, BlockDriverState *base, const char *backing_file_str,
BlockDriverState *bottom,
int creation_flags, int64_t speed, int creation_flags, int64_t speed,
BlockdevOnError on_error, Error **errp); BlockdevOnError on_error,
const char *filter_node_name,
Error **errp);
/** /**
* commit_start: * commit_start:
@ -1256,6 +1266,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
* @sync_mode: What parts of the disk image should be copied to the destination. * @sync_mode: What parts of the disk image should be copied to the destination.
* @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental' * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental'
* @bitmap_mode: The bitmap synchronization policy to use. * @bitmap_mode: The bitmap synchronization policy to use.
* @perf: Performance options. All actual fields assumed to be present,
* all ".has_*" fields are ignored.
* @on_source_error: The action to take upon error reading from the source. * @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target. * @on_target_error: The action to take upon error writing to the target.
* @creation_flags: Flags that control the behavior of the Job lifetime. * @creation_flags: Flags that control the behavior of the Job lifetime.
@ -1274,6 +1286,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BitmapSyncMode bitmap_mode, BitmapSyncMode bitmap_mode,
bool compress, bool compress,
const char *filter_node_name, const char *filter_node_name,
BackupPerf *perf,
BlockdevOnError on_source_error, BlockdevOnError on_source_error,
BlockdevOnError on_target_error, BlockdevOnError on_target_error,
int creation_flags, int creation_flags,

View File

@ -52,6 +52,8 @@ struct BlockJobDriver {
* besides job->blk to the new AioContext. * besides job->blk to the new AioContext.
*/ */
void (*attached_aio_context)(BlockJob *job, AioContext *new_context); void (*attached_aio_context)(BlockJob *job, AioContext *new_context);
void (*set_speed)(BlockJob *job, int64_t speed);
}; };
/** /**

3
job.c
View File

@ -553,6 +553,9 @@ static bool job_timer_not_pending(Job *job)
void job_pause(Job *job) void job_pause(Job *job)
{ {
job->pause_count++; job->pause_count++;
if (!job->paused) {
job_enter(job);
}
} }
void job_resume(Job *job) void job_resume(Job *job)

View File

@ -1371,6 +1371,30 @@
{ 'struct': 'BlockdevSnapshot', { 'struct': 'BlockdevSnapshot',
'data': { 'node': 'str', 'overlay': 'str' } } 'data': { 'node': 'str', 'overlay': 'str' } }
##
# @BackupPerf:
#
# Optional parameters for backup. These parameters don't affect
# functionality, but may significantly affect performance.
#
# @use-copy-range: Use copy offloading. Default false.
#
# @max-workers: Maximum number of parallel requests for the sustained background
# copying process. Doesn't influence copy-before-write operations.
# Default 64.
#
# @max-chunk: Maximum request length for the sustained background copying
# process. Doesn't influence copy-before-write operations.
# 0 means unlimited. If max-chunk is non-zero then it should not be
# less than job cluster size which is calculated as maximum of
# target image cluster size and 64k. Default 0.
#
# Since: 6.0
##
{ 'struct': 'BackupPerf',
'data': { '*use-copy-range': 'bool',
'*max-workers': 'int', '*max-chunk': 'int64' } }
## ##
# @BackupCommon: # @BackupCommon:
# #
@ -1426,6 +1450,8 @@
# above node specified by @drive. If this option is not given, # above node specified by @drive. If this option is not given,
# a node name is autogenerated. (Since: 4.2) # a node name is autogenerated. (Since: 4.2)
# #
# @x-perf: Performance options. (Since 6.0)
#
# Note: @on-source-error and @on-target-error only affect background # Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's # I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used. # rerror/werror actions will be used.
@ -1440,7 +1466,7 @@
'*on-source-error': 'BlockdevOnError', '*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError', '*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool', '*auto-finalize': 'bool', '*auto-dismiss': 'bool',
'*filter-node-name': 'str' } } '*filter-node-name': 'str', '*x-perf': 'BackupPerf' } }
## ##
# @DriveBackup: # @DriveBackup:
@ -2517,10 +2543,14 @@
# @device: the device or node name of the top image # @device: the device or node name of the top image
# #
# @base: the common backing file name. # @base: the common backing file name.
# It cannot be set if @base-node is also set. # It cannot be set if @base-node or @bottom is also set.
# #
# @base-node: the node name of the backing file. # @base-node: the node name of the backing file.
# It cannot be set if @base is also set. (Since 2.8) # It cannot be set if @base or @bottom is also set. (Since 2.8)
#
# @bottom: the last node in the chain that should be streamed into
# top. It cannot be set if @base or @base-node is also set.
# It cannot be filter node. (Since 6.0)
# #
# @backing-file: The backing file string to write into the top # @backing-file: The backing file string to write into the top
# image. This filename is not validated. # image. This filename is not validated.
@ -2543,6 +2573,11 @@
# 'stop' and 'enospc' can only be used if the block device # 'stop' and 'enospc' can only be used if the block device
# supports io-status (see BlockInfo). Since 1.3. # supports io-status (see BlockInfo). Since 1.3.
# #
# @filter-node-name: the node name that should be assigned to the
# filter driver that the stream job inserts into the graph
# above @device. If this option is not given, a node name is
# autogenerated. (Since: 6.0)
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has # @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before # finished its work, waiting for @block-job-finalize before
# making any block graph changes. # making any block graph changes.
@ -2571,8 +2606,9 @@
## ##
{ 'command': 'block-stream', { 'command': 'block-stream',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', 'data': { '*job-id': 'str', 'device': 'str', '*base': 'str',
'*base-node': 'str', '*backing-file': 'str', '*speed': 'int', '*base-node': 'str', '*backing-file': 'str', '*bottom': 'str',
'*on-error': 'BlockdevOnError', '*speed': 'int', '*on-error': 'BlockdevOnError',
'*filter-node-name': 'str',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } } '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
## ##
@ -3953,6 +3989,24 @@
'data': { 'throttle-group': 'str', 'data': { 'throttle-group': 'str',
'file' : 'BlockdevRef' 'file' : 'BlockdevRef'
} } } }
##
# @BlockdevOptionsCor:
#
# Driver specific block device options for the copy-on-read driver.
#
# @bottom: The name of a non-filter node (allocation-bearing layer) that
# limits the COR operations in the backing chain (inclusive), so
# that no data below this node will be copied by this filter.
# If option is absent, the limit is not applied, so that data
# from all backing layers may be copied.
#
# Since: 6.0
##
{ 'struct': 'BlockdevOptionsCor',
'base': 'BlockdevOptionsGenericFormat',
'data': { '*bottom': 'str' } }
## ##
# @BlockdevOptions: # @BlockdevOptions:
# #
@ -4005,7 +4059,7 @@
'bochs': 'BlockdevOptionsGenericFormat', 'bochs': 'BlockdevOptionsGenericFormat',
'cloop': 'BlockdevOptionsGenericFormat', 'cloop': 'BlockdevOptionsGenericFormat',
'compress': 'BlockdevOptionsGenericFormat', 'compress': 'BlockdevOptionsGenericFormat',
'copy-on-read':'BlockdevOptionsGenericFormat', 'copy-on-read':'BlockdevOptionsCor',
'dmg': 'BlockdevOptionsGenericFormat', 'dmg': 'BlockdevOptionsGenericFormat',
'file': 'BlockdevOptionsFile', 'file': 'BlockdevOptionsFile',
'ftp': 'BlockdevOptionsCurlFtp', 'ftp': 'BlockdevOptionsCurlFtp',

View File

@ -0,0 +1,167 @@
#!/usr/bin/env python3
#
# Bench backup block-job
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import json
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
cmd_options = env['cmd-options'] if 'cmd-options' in env else {}
return bench_block_copy(env['qemu-binary'], env['cmd'],
cmd_options,
case['source'], case['target'])
def bench(args):
test_cases = []
sources = {}
targets = {}
for d in args.dir:
label, path = d.split(':') # paths with colon not supported
sources[label] = drv_file(path + '/test-source')
targets[label] = drv_file(path + '/test-target')
if args.nbd:
nbd = args.nbd.split(':')
host = nbd[0]
port = '10809' if len(nbd) == 1 else nbd[1]
drv = drv_nbd(host, port)
sources['nbd'] = drv
targets['nbd'] = drv
for t in args.test:
src, dst = t.split(':')
test_cases.append({
'id': t,
'source': sources[src],
'target': targets[dst]
})
binaries = [] # list of (<label>, <path>, [<options>])
for i, q in enumerate(args.env):
name_path = q.split(':')
if len(name_path) == 1:
label = f'q{i}'
path_opts = name_path[0].split(',')
else:
assert len(name_path) == 2 # paths with colon not supported
label = name_path[0]
path_opts = name_path[1].split(',')
binaries.append((label, path_opts[0], path_opts[1:]))
test_envs = []
bin_paths = {}
for i, q in enumerate(args.env):
opts = q.split(',')
label_path = opts[0]
opts = opts[1:]
if ':' in label_path:
# path with colon inside is not supported
label, path = label_path.split(':')
bin_paths[label] = path
elif label_path in bin_paths:
label = label_path
path = bin_paths[label]
else:
path = label_path
label = f'q{i}'
bin_paths[label] = path
x_perf = {}
is_mirror = False
for opt in opts:
if opt == 'mirror':
is_mirror = True
elif opt == 'copy-range=on':
x_perf['use-copy-range'] = True
elif opt == 'copy-range=off':
x_perf['use-copy-range'] = False
elif opt.startswith('max-workers='):
x_perf['max-workers'] = int(opt.split('=')[1])
if is_mirror:
assert not x_perf
test_envs.append({
'id': f'mirror({label})',
'cmd': 'blockdev-mirror',
'qemu-binary': path
})
else:
test_envs.append({
'id': f'backup({label})\n' + '\n'.join(opts),
'cmd': 'blockdev-backup',
'cmd-options': {'x-perf': x_perf} if x_perf else {},
'qemu-binary': path
})
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
print(results_to_text(result))
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
items.extend(values)
setattr(namespace, self.dest, items)
if __name__ == '__main__':
p = argparse.ArgumentParser('Backup benchmark', epilog='''
ENV format
(LABEL:PATH|LABEL|PATH)[,max-workers=N][,use-copy-range=(on|off)][,mirror]
LABEL short name for the binary
PATH path to the binary
max-workers set x-perf.max-workers of backup job
use-copy-range set x-perf.use-copy-range of backup job
mirror use mirror job instead of backup''',
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('--env', nargs='+', help='''\
Qemu binaries with labels and options, see below
"ENV format" section''',
action=ExtendAction)
p.add_argument('--dir', nargs='+', help='''\
Directories, each containing "test-source" and/or
"test-target" files, raw images to used in
benchmarking. File path with label, like
label:/path/to/directory''',
action=ExtendAction)
p.add_argument('--nbd', help='''\
host:port for remote NBD image, (or just host, for
default port 10809). Use it in tests, label is "nbd"
(but you cannot create test nbd:nbd).''')
p.add_argument('--test', nargs='+', help='''\
Tests, in form source-dir-label:target-dir-label''',
action=ExtendAction)
bench(p.parse_args())

View File

@ -25,7 +25,7 @@ from bench_block_job import bench_block_copy, drv_file, drv_nbd
def bench_func(env, case): def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """ """ Handle one "cell" of benchmarking table. """
return bench_block_copy(env['qemu_binary'], env['cmd'], return bench_block_copy(env['qemu_binary'], env['cmd'], {}
case['source'], case['target']) case['source'], case['target'])

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# #
# Benchmark block jobs # Benchmark block jobs
# #
@ -78,16 +78,19 @@ def bench_block_job(cmd, cmd_args, qemu_args):
# Bench backup or mirror # Bench backup or mirror
def bench_block_copy(qemu_binary, cmd, source, target): def bench_block_copy(qemu_binary, cmd, cmd_options, source, target):
"""Helper to run bench_block_job() for mirror or backup""" """Helper to run bench_block_job() for mirror or backup"""
assert cmd in ('blockdev-backup', 'blockdev-mirror') assert cmd in ('blockdev-backup', 'blockdev-mirror')
source['node-name'] = 'source' source['node-name'] = 'source'
target['node-name'] = 'target' target['node-name'] = 'target'
return bench_block_job(cmd, cmd_options['job-id'] = 'job0'
{'job-id': 'job0', 'device': 'source', cmd_options['device'] = 'source'
'target': 'target', 'sync': 'full'}, cmd_options['target'] = 'target'
cmd_options['sync'] = 'full'
return bench_block_job(cmd, cmd_options,
[qemu_binary, [qemu_binary,
'-blockdev', json.dumps(source), '-blockdev', json.dumps(source),
'-blockdev', json.dumps(target)]) '-blockdev', json.dumps(target)])

View File

@ -246,7 +246,9 @@ class TestParallelOps(iotests.QMPTestCase):
node_name = 'node%d' % i node_name = 'node%d' % i
job_id = 'stream-%s' % node_name job_id = 'stream-%s' % node_name
pending_jobs.append(job_id) pending_jobs.append(job_id)
result = self.vm.qmp('block-stream', device=node_name, job_id=job_id, base=self.imgs[i-2], speed=1024) result = self.vm.qmp('block-stream', device=node_name,
job_id=job_id, bottom=f'node{i-1}',
speed=1024)
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
for job in pending_jobs: for job in pending_jobs:
@ -277,12 +279,14 @@ class TestParallelOps(iotests.QMPTestCase):
self.assert_no_active_block_jobs() self.assert_no_active_block_jobs()
# Set a speed limit to make sure that this job blocks the rest # Set a speed limit to make sure that this job blocks the rest
result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4', base=self.imgs[1], speed=1024*1024) result = self.vm.qmp('block-stream', device='node4',
job_id='stream-node4', base=self.imgs[1],
filter_node_name='stream-filter', speed=1024*1024)
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2]) result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2])
self.assert_qmp(result, 'error/desc', self.assert_qmp(result, 'error/desc',
"Node 'node4' is busy: block device is in use by block job: stream") "Node 'stream-filter' is busy: block device is in use by block job: stream")
result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2]) result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2])
self.assert_qmp(result, 'error/desc', self.assert_qmp(result, 'error/desc',
@ -295,7 +299,7 @@ class TestParallelOps(iotests.QMPTestCase):
# block-commit should also fail if it touches nodes used by the stream job # block-commit should also fail if it touches nodes used by the stream job
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4') result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4')
self.assert_qmp(result, 'error/desc', self.assert_qmp(result, 'error/desc',
"Node 'node4' is busy: block device is in use by block job: stream") "Node 'stream-filter' is busy: block device is in use by block job: stream")
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1') result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1')
self.assert_qmp(result, 'error/desc', self.assert_qmp(result, 'error/desc',

View File

@ -308,8 +308,13 @@ class BackupTest(iotests.QMPTestCase):
event = self.vm.event_wait(name="BLOCK_JOB_ERROR", event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
match={'data': {'device': 'drive0'}}) match={'data': {'device': 'drive0'}})
self.assertNotEqual(event, None) self.assertNotEqual(event, None)
# OK, job should be wedged # OK, job should pause, but it can't do it immediately, as it can't
res = self.vm.qmp('query-block-jobs') # cancel other parallel requests (which didn't fail)
with iotests.Timeout(60, "Timeout waiting for backup actually paused"):
while True:
res = self.vm.qmp('query-block-jobs')
if res['return'][0]['status'] == 'paused':
break
self.assert_qmp(res, 'return[0]/status', 'paused') self.assert_qmp(res, 'return[0]/status', 'paused')
res = self.vm.qmp('block-job-dismiss', id='drive0') res = self.vm.qmp('block-job-dismiss', id='drive0')
self.assert_qmp(res, 'error/desc', self.assert_qmp(res, 'error/desc',

View File

@ -42,6 +42,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
@ -91,6 +93,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
@ -140,6 +144,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
@ -189,6 +195,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
@ -238,6 +246,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
@ -287,6 +297,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
@ -335,6 +347,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
@ -383,6 +397,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
@ -431,6 +447,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
@ -479,6 +497,8 @@ read 512/512 bytes at offset 0
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
@ -507,6 +527,8 @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
@ -528,6 +550,8 @@ Images are identical.
{"execute":"quit"} {"execute":"quit"}
{"return": {}} {"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}

View File

@ -1,8 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# group: rw # group: rw
# #
# Test case for the QMP 'change' command and all other associated # Test case for media change monitor commands
# commands
# #
# Copyright (C) 2015 Red Hat, Inc. # Copyright (C) 2015 Red Hat, Inc.
# #
@ -74,23 +73,6 @@ class ChangeBaseClass(iotests.QMPTestCase):
class GeneralChangeTestsBaseClass(ChangeBaseClass): class GeneralChangeTestsBaseClass(ChangeBaseClass):
def test_change(self):
# 'change' requires a drive name, so skip the test for blockdev
if not self.use_drive:
return
result = self.vm.qmp('change', device='drive0', target=new_img,
arg=iotests.imgfmt)
self.assert_qmp(result, 'return', {})
self.wait_for_open()
self.wait_for_close()
result = self.vm.qmp('query-block')
if self.has_real_tray:
self.assert_qmp(result, 'return[0]/tray_open', False)
self.assert_qmp(result, 'return[0]/inserted/image/filename', new_img)
def test_blockdev_change_medium(self): def test_blockdev_change_medium(self):
result = self.vm.qmp('blockdev-change-medium', result = self.vm.qmp('blockdev-change-medium',
id=self.device_name, filename=new_img, id=self.device_name, filename=new_img,

View File

@ -1,5 +1,5 @@
....................................................................................................................................................................... ...........................................................................................................................................................
---------------------------------------------------------------------- ----------------------------------------------------------------------
Ran 167 tests Ran 155 tests
OK OK

View File

@ -23,6 +23,7 @@
import os import os
import iotests import iotests
from iotests import try_remove
def io_write_patterns(img, patterns): def io_write_patterns(img, patterns):
@ -30,13 +31,6 @@ def io_write_patterns(img, patterns):
iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
def try_remove(img):
try:
os.remove(img)
except OSError:
pass
def transaction_action(action, **kwargs): def transaction_action(action, **kwargs):
return { return {
'type': action, 'type': action,

View File

@ -21,68 +21,87 @@
import os import os
import iotests import iotests
import time
class TestStopWithBlockJob(iotests.QMPTestCase): class TestStopWithBlockJob(iotests.QMPTestCase):
test_img = os.path.join(iotests.test_dir, 'test.img') test_img = os.path.join(iotests.test_dir, 'test.img')
target_img = os.path.join(iotests.test_dir, 'target.img') target_img = os.path.join(iotests.test_dir, 'target.img')
base_img = os.path.join(iotests.test_dir, 'base.img') base_img = os.path.join(iotests.test_dir, 'base.img')
overlay_img = os.path.join(iotests.test_dir, 'overlay.img')
def setUp(self): def setUp(self):
iotests.qemu_img('create', '-f', iotests.imgfmt, self.base_img, "1G") iotests.qemu_img('create', '-f', iotests.imgfmt, self.base_img, "1G")
iotests.qemu_img('create', '-f', iotests.imgfmt, self.test_img, iotests.qemu_img('create', '-f', iotests.imgfmt, self.test_img,
"-b", self.base_img, '-F', iotests.imgfmt) "-b", self.base_img, '-F', iotests.imgfmt)
iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write -P0x5d 1M 128M', self.test_img) iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write -P0x5d 1M 128M',
self.vm = iotests.VM().add_drive(self.test_img) self.test_img)
self.vm = iotests.VM()
self.vm.add_object('throttle-group,id=tg0,x-bps-total=1024')
source_drive = 'driver=throttle,' \
'node-name=source,' \
'throttle-group=tg0,' \
f'file.driver={iotests.imgfmt},' \
f'file.file.filename={self.test_img}'
self.vm.add_drive(None, source_drive)
self.vm.launch() self.vm.launch()
def tearDown(self): def tearDown(self):
params = {"device": "drive0",
"bps": 0,
"bps_rd": 0,
"bps_wr": 0,
"iops": 0,
"iops_rd": 0,
"iops_wr": 0,
}
result = self.vm.qmp("block_set_io_throttle", conv_keys=False,
**params)
self.vm.shutdown() self.vm.shutdown()
for img in (self.test_img, self.target_img, self.base_img,
self.overlay_img):
iotests.try_remove(img)
def do_test_stop(self, cmd, **args): def do_test_stop(self, cmd, **args):
"""Test 'stop' while block job is running on a throttled drive. """Test 'stop' while block job is running on a throttled drive.
The 'stop' command shouldn't drain the job""" The 'stop' command shouldn't drain the job"""
params = {"device": "drive0",
"bps": 1024,
"bps_rd": 0,
"bps_wr": 0,
"iops": 0,
"iops_rd": 0,
"iops_wr": 0,
}
result = self.vm.qmp("block_set_io_throttle", conv_keys=False,
**params)
self.assert_qmp(result, 'return', {})
result = self.vm.qmp(cmd, **args) result = self.vm.qmp(cmd, **args)
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
result = self.vm.qmp("stop") result = self.vm.qmp("stop")
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
result = self.vm.qmp("query-block-jobs") result = self.vm.qmp("query-block-jobs")
self.assert_qmp(result, 'return[0]/busy', True)
self.assert_qmp(result, 'return[0]/status', 'running')
self.assert_qmp(result, 'return[0]/ready', False) self.assert_qmp(result, 'return[0]/ready', False)
def test_drive_mirror(self): def test_drive_mirror(self):
self.do_test_stop("drive-mirror", device="drive0", self.do_test_stop("drive-mirror", device="drive0",
target=self.target_img, target=self.target_img, format=iotests.imgfmt,
sync="full") sync="full", buf_size=65536)
def test_drive_backup(self): def test_drive_backup(self):
# Limit max-chunk and max-workers so that block-copy will not
# launch so many workers working on so much data each that
# stop's bdrv_drain_all() would finish the job
self.do_test_stop("drive-backup", device="drive0", self.do_test_stop("drive-backup", device="drive0",
target=self.target_img, target=self.target_img, format=iotests.imgfmt,
sync="full") sync="full",
x_perf={ 'max-chunk': 65536,
'max-workers': 8 })
def test_block_commit(self): def test_block_commit(self):
self.do_test_stop("block-commit", device="drive0") # Add overlay above the source node so that we actually use a
# commit job instead of a mirror job
iotests.qemu_img('create', '-f', iotests.imgfmt, self.overlay_img,
'1G')
result = self.vm.qmp('blockdev-add', **{
'node-name': 'overlay',
'driver': iotests.imgfmt,
'file': {
'driver': 'file',
'filename': self.overlay_img
}
})
self.assert_qmp(result, 'return', {})
result = self.vm.qmp('blockdev-snapshot',
node='source', overlay='overlay')
self.assert_qmp(result, 'return', {})
self.do_test_stop('block-commit', device='drive0', top_node='source')
if __name__ == '__main__': if __name__ == '__main__':
iotests.main(supported_fmts=["qcow2"], iotests.main(supported_fmts=["qcow2"],

View File

@ -165,7 +165,7 @@ wrote 1048576/1048576 bytes at offset 0
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{'execute': 'blockdev-del', {'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}} 'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}} {"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: stream"}}
{'execute': 'block-job-cancel', {'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}} 'arguments': {'device': 'job0'}}
{"return": {}} {"return": {}}

View File

@ -57,7 +57,7 @@ $QEMU_IMG measure --image-opts # missing filename
$QEMU_IMG measure -f qcow2 # missing filename $QEMU_IMG measure -f qcow2 # missing filename
$QEMU_IMG measure -l snap1 # missing filename $QEMU_IMG measure -l snap1 # missing filename
$QEMU_IMG measure -o , # invalid option list $QEMU_IMG measure -o , # invalid option list
$QEMU_IMG measure -l snapshot.foo # invalid snapshot option $QEMU_IMG measure -l snapshot.foo=bar # invalid snapshot option
$QEMU_IMG measure --output foo # invalid output format $QEMU_IMG measure --output foo # invalid output format
$QEMU_IMG measure --size -1 # invalid image size $QEMU_IMG measure --size -1 # invalid image size
$QEMU_IMG measure -O foo "$TEST_IMG" # unknown image file format $QEMU_IMG measure -O foo "$TEST_IMG" # unknown image file format

View File

@ -11,7 +11,7 @@ qemu-img: --image-opts, -f, and -l require a filename argument.
qemu-img: --image-opts, -f, and -l require a filename argument. qemu-img: --image-opts, -f, and -l require a filename argument.
qemu-img: Invalid option list: , qemu-img: Invalid option list: ,
qemu-img: Invalid parameter 'snapshot.foo' qemu-img: Invalid parameter 'snapshot.foo'
qemu-img: Failed in parsing snapshot param 'snapshot.foo' qemu-img: Failed in parsing snapshot param 'snapshot.foo=bar'
qemu-img: --output must be used with human or json as argument. qemu-img: --output must be used with human or json as argument.
qemu-img: Invalid image size specified. Must be between 0 and 9223372036854775807. qemu-img: Invalid image size specified. Must be between 0 and 9223372036854775807.
qemu-img: Unknown file format 'foo' qemu-img: Unknown file format 'foo'

View File

@ -11,7 +11,7 @@ qemu-img: --image-opts, -f, and -l require a filename argument.
qemu-img: --image-opts, -f, and -l require a filename argument. qemu-img: --image-opts, -f, and -l require a filename argument.
qemu-img: Invalid option list: , qemu-img: Invalid option list: ,
qemu-img: Invalid parameter 'snapshot.foo' qemu-img: Invalid parameter 'snapshot.foo'
qemu-img: Failed in parsing snapshot param 'snapshot.foo' qemu-img: Failed in parsing snapshot param 'snapshot.foo=bar'
qemu-img: --output must be used with human or json as argument. qemu-img: --output must be used with human or json as argument.
qemu-img: Invalid image size specified. Must be between 0 and 9223372036854775807. qemu-img: Invalid image size specified. Must be between 0 and 9223372036854775807.
qemu-img: Unknown file format 'foo' qemu-img: Unknown file format 'foo'

View File

@ -183,7 +183,8 @@ _send_qemu_cmd $h \
'target': '$TEST_IMG.copy', 'target': '$TEST_IMG.copy',
'format': '$IMGFMT', 'format': '$IMGFMT',
'sync': 'full', 'sync': 'full',
'speed': 65536 } }" \ 'speed': 65536,
'x-perf': {'max-chunk': 65536} } }" \
"return" "return"
# If we don't sleep here 'quit' command races with disk I/O # If we don't sleep here 'quit' command races with disk I/O

View File

@ -88,7 +88,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off
'target': 'TEST_DIR/t.IMGFMT.copy', 'target': 'TEST_DIR/t.IMGFMT.copy',
'format': 'IMGFMT', 'format': 'IMGFMT',
'sync': 'full', 'sync': 'full',
'speed': 65536 } } 'speed': 65536,
'x-perf': { 'max-chunk': 65536 } } }
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16 Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}

View File

@ -204,13 +204,13 @@ with iotests.FilePath('disk.img') as disk_path, \
# but related to this also automatic state transitions like job # but related to this also automatic state transitions like job
# completion), but still get pause points often enough to avoid making this # completion), but still get pause points often enough to avoid making this
# test very slow, it's important to have the right ratio between speed and # test very slow, it's important to have the right ratio between speed and
# buf_size. # copy-chunk-size.
# #
# For backup, buf_size is hard-coded to the source image cluster size (64k), # Chose 64k copy-chunk-size both for mirror (by buf_size) and backup (by
# so we'll pick the same for mirror. The slice time, i.e. the granularity # x-max-chunk). The slice time, i.e. the granularity of the rate limiting
# of the rate limiting is 100ms. With a speed of 256k per second, we can # is 100ms. With a speed of 256k per second, we can get four pause points
# get four pause points per second. This gives us 250ms per iteration, # per second. This gives us 250ms per iteration, which should be enough to
# which should be enough to stay deterministic. # stay deterministic.
test_job_lifecycle(vm, 'drive-mirror', has_ready=True, job_args={ test_job_lifecycle(vm, 'drive-mirror', has_ready=True, job_args={
'device': 'drive0-node', 'device': 'drive0-node',
@ -227,6 +227,7 @@ with iotests.FilePath('disk.img') as disk_path, \
'target': copy_path, 'target': copy_path,
'sync': 'full', 'sync': 'full',
'speed': 262144, 'speed': 262144,
'x-perf': {'max-chunk': 65536},
'auto-finalize': auto_finalize, 'auto-finalize': auto_finalize,
'auto-dismiss': auto_dismiss, 'auto-dismiss': auto_dismiss,
}) })

View File

@ -893,20 +893,24 @@ class TestBlockdevReopen(iotests.QMPTestCase):
# hd1 <- hd0 # hd1 <- hd0
result = self.vm.qmp('block-stream', conv_keys = True, job_id = 'stream0', result = self.vm.qmp('block-stream', conv_keys = True, job_id = 'stream0',
device = 'hd1', auto_finalize = False) device = 'hd1', filter_node_name='cor',
auto_finalize = False)
self.assert_qmp(result, 'return', {}) self.assert_qmp(result, 'return', {})
# We can't reopen with the original options because that would # We can't reopen with the original options because there is a filter
# make hd1 read-only and block-stream requires it to be read-write # inserted by stream job above hd1.
# (Which error message appears depends on whether the stream job is
# already done with copying at this point.)
self.reopen(opts, {}, self.reopen(opts, {},
["Can't set node 'hd1' to r/o with copy-on-read enabled", "Cannot change the option 'backing.backing.file.node-name'")
"Cannot make block node read-only, there is a writer on it"])
# We can't reopen hd1 to read-only, as block-stream requires it to be
# read-write
self.reopen(opts['backing'], {'read-only': True},
"Cannot make block node read-only, there is a writer on it")
# We can't remove hd2 while the stream job is ongoing # We can't remove hd2 while the stream job is ongoing
opts['backing']['backing'] = None opts['backing']['backing'] = None
self.reopen(opts, {'backing.read-only': False}, "Cannot change 'backing' link from 'hd1' to 'hd2'") self.reopen(opts['backing'], {'read-only': False},
"Cannot change 'backing' link from 'hd1' to 'hd2'")
# We can detach hd1 from hd0 because it doesn't affect the stream job # We can detach hd1 from hd0 because it doesn't affect the stream job
opts['backing'] = None opts['backing'] = None

View File

@ -192,6 +192,7 @@ def blockdev_backup(vm, device, target, sync, **kwargs):
target=target, target=target,
sync=sync, sync=sync,
filter_node_name='backup-top', filter_node_name='backup-top',
x_perf={'max-workers': 1},
**kwargs) **kwargs)
return result return result

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/usr/bin/env python3
# group: meta # group: meta
# #
# Copyright (C) 2020 Red Hat, Inc. # Copyright (C) 2020 Red Hat, Inc.
@ -16,30 +16,98 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
seq=$(basename $0) import os
echo "QA output created by $seq" import re
import shutil
import subprocess
import sys
status=1 # failure is the default! import iotests
# get standard environment
. ./common.rc
if ! type -p "pylint-3" > /dev/null; then # TODO: Empty this list!
_notrun "pylint-3 not found" SKIP_FILES = (
fi '030', '040', '041', '044', '045', '055', '056', '057', '065', '093',
if ! type -p "mypy" > /dev/null; then '096', '118', '124', '132', '136', '139', '147', '148', '149',
_notrun "mypy not found" '151', '152', '155', '163', '165', '169', '194', '196', '199', '202',
fi '203', '205', '206', '207', '208', '210', '211', '212', '213', '216',
'218', '219', '222', '224', '228', '234', '235', '236', '237', '238',
'240', '242', '245', '246', '248', '255', '256', '257', '258', '260',
'262', '264', '266', '274', '277', '280', '281', '295', '296', '298',
'299', '302', '303', '304', '307',
'nbd-fault-injector.py', 'qcow2.py', 'qcow2_format.py', 'qed.py'
)
pylint-3 --score=n iotests.py
MYPYPATH=../../python/ mypy --warn-unused-configs --disallow-subclassing-any \ def is_python_file(filename):
--disallow-any-generics --disallow-incomplete-defs \ if not os.path.isfile(filename):
--disallow-untyped-decorators --no-implicit-optional \ return False
--warn-redundant-casts --warn-unused-ignores \
--no-implicit-reexport iotests.py
# success, all done if filename.endswith('.py'):
echo "*** done" return True
rm -f $seq.full
status=0 with open(filename) as f:
try:
first_line = f.readline()
return re.match('^#!.*python', first_line) is not None
except UnicodeDecodeError: # Ignore binary files
return False
def run_linters():
files = [filename for filename in (set(os.listdir('.')) - set(SKIP_FILES))
if is_python_file(filename)]
iotests.logger.debug('Files to be checked:')
iotests.logger.debug(', '.join(sorted(files)))
print('=== pylint ===')
sys.stdout.flush()
# Todo notes are fine, but fixme's or xxx's should probably just be
# fixed (in tests, at least)
env = os.environ.copy()
qemu_module_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'python')
try:
env['PYTHONPATH'] += os.pathsep + qemu_module_path
except KeyError:
env['PYTHONPATH'] = qemu_module_path
subprocess.run(('pylint-3', '--score=n', '--notes=FIXME,XXX', *files),
env=env, check=False)
print('=== mypy ===')
sys.stdout.flush()
# We have to call mypy separately for each file. Otherwise, it
# will interpret all given files as belonging together (i.e., they
# may not both define the same classes, etc.; most notably, they
# must not both define the __main__ module).
env['MYPYPATH'] = env['PYTHONPATH']
for filename in files:
p = subprocess.run(('mypy',
'--warn-unused-configs',
'--disallow-subclassing-any',
'--disallow-any-generics',
'--disallow-incomplete-defs',
'--disallow-untyped-decorators',
'--no-implicit-optional',
'--warn-redundant-casts',
'--warn-unused-ignores',
'--no-implicit-reexport',
filename),
env=env,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
if p.returncode != 0:
print(p.stdout)
for linter in ('pylint-3', 'mypy'):
if shutil.which(linter) is None:
iotests.notrun(f'{linter} not found')
iotests.script_main(run_linters)

View File

@ -1,3 +1,2 @@
QA output created by 297 === pylint ===
Success: no issues found in 1 source file === mypy ===
*** done

View File

@ -23,12 +23,15 @@ import os
import random import random
import re import re
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Union
import iotests import iotests
# Import qemu after iotests.py has amended sys.path
# pylint: disable=wrong-import-order
import qemu import qemu
BlockBitmapMapping = List[Dict[str, Union[str, List[Dict[str, str]]]]] BlockBitmapMapping = List[Dict[str, Union[str, List[Dict[str, str]]]]]
assert iotests.sock_dir is not None
mig_sock = os.path.join(iotests.sock_dir, 'mig_sock') mig_sock = os.path.join(iotests.sock_dir, 'mig_sock')
@ -112,10 +115,14 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
If @msg is None, check that there has not been any error. If @msg is None, check that there has not been any error.
""" """
self.vm_b.shutdown() self.vm_b.shutdown()
log = self.vm_b.get_log()
assert log is not None # Loaded after shutdown
if msg is None: if msg is None:
self.assertNotIn('qemu-system-', self.vm_b.get_log()) self.assertNotIn('qemu-system-', log)
else: else:
self.assertIn(msg, self.vm_b.get_log()) self.assertIn(msg, log)
@staticmethod @staticmethod
def mapping(node_name: str, node_alias: str, def mapping(node_name: str, node_alias: str,
@ -447,9 +454,13 @@ class TestBlockBitmapMappingErrors(TestDirtyBitmapMigration):
# Check for the error in the source's log # Check for the error in the source's log
self.vm_a.shutdown() self.vm_a.shutdown()
log = self.vm_a.get_log()
assert log is not None # Loaded after shutdown
self.assertIn(f"Cannot migrate bitmap '{name}' on node " self.assertIn(f"Cannot migrate bitmap '{name}' on node "
f"'{self.src_node_name}': Name is longer than 255 bytes", f"'{self.src_node_name}': Name is longer than 255 bytes",
self.vm_a.get_log()) log)
# Expect abnormal shutdown of the destination VM because of # Expect abnormal shutdown of the destination VM because of
# the failed migration # the failed migration

117
tests/qemu-iotests/310 Executable file
View File

@ -0,0 +1,117 @@
#!/usr/bin/env python3
# group: rw quick
#
# Copy-on-read tests using a COR filter with a bottom node
#
# Copyright (C) 2018 Red Hat, Inc.
# Copyright (c) 2020 Virtuozzo International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import iotests
from iotests import log, qemu_img, qemu_io_silent
# Need backing file support
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
log('')
log('=== Copy-on-read across nodes ===')
log('')
# This test is similar to the 216 one by Max Reitz <mreitz@redhat.com>
# The difference is that this test case involves a bottom node to the
# COR filter driver.
with iotests.FilePath('base.img') as base_img_path, \
iotests.FilePath('mid.img') as mid_img_path, \
iotests.FilePath('top.img') as top_img_path, \
iotests.VM() as vm:
log('--- Setting up images ---')
log('')
assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0
assert qemu_io_silent(base_img_path, '-c', 'write -P 1 0M 1M') == 0
assert qemu_io_silent(base_img_path, '-c', 'write -P 1 3M 1M') == 0
assert qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path,
'-F', iotests.imgfmt, mid_img_path) == 0
assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 2M 1M') == 0
assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 4M 1M') == 0
assert qemu_img('create', '-f', iotests.imgfmt, '-b', mid_img_path,
'-F', iotests.imgfmt, top_img_path) == 0
assert qemu_io_silent(top_img_path, '-c', 'write -P 2 1M 1M') == 0
# 0 1 2 3 4
# top 2
# mid 3 3
# base 1 1
log('Done')
log('')
log('--- Doing COR ---')
log('')
vm.launch()
log(vm.qmp('blockdev-add',
node_name='node0',
driver='copy-on-read',
bottom='node2',
file={
'driver': iotests.imgfmt,
'file': {
'driver': 'file',
'filename': top_img_path
},
'backing': {
'node-name': 'node2',
'driver': iotests.imgfmt,
'file': {
'driver': 'file',
'filename': mid_img_path
},
'backing': {
'driver': iotests.imgfmt,
'file': {
'driver': 'file',
'filename': base_img_path
}
},
}
}))
# Trigger COR
log(vm.qmp('human-monitor-command',
command_line='qemu-io node0 "read 0 5M"'))
vm.shutdown()
log('')
log('--- Checking COR result ---')
log('')
# Detach backing to check that we can read the data from the top level now
assert qemu_img('rebase', '-u', '-b', '', '-f', iotests.imgfmt,
top_img_path) == 0
assert qemu_io_silent(top_img_path, '-c', 'read -P 0 0 1M') == 0
assert qemu_io_silent(top_img_path, '-c', 'read -P 2 1M 1M') == 0
assert qemu_io_silent(top_img_path, '-c', 'read -P 3 2M 1M') == 0
assert qemu_io_silent(top_img_path, '-c', 'read -P 0 3M 1M') == 0
assert qemu_io_silent(top_img_path, '-c', 'read -P 3 4M 1M') == 0
log('Done')

View File

@ -0,0 +1,15 @@
=== Copy-on-read across nodes ===
--- Setting up images ---
Done
--- Doing COR ---
{"return": {}}
{"return": ""}
--- Checking COR result ---
Done

104
tests/qemu-iotests/313 Executable file
View File

@ -0,0 +1,104 @@
#!/usr/bin/env bash
# group: rw auto quick
#
# Test for the regression fixed in commit c8bf9a9169
#
# Copyright (C) 2020 Igalia, S.L.
# Author: Alberto Garcia <berto@igalia.com>
# Based on a test case by Maxim Levitsky <mlevitsk@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# creator
owner=berto@igalia.com
seq=`basename $0`
echo "QA output created by $seq"
status=1 # failure is the default!
_cleanup()
{
_cleanup_test_img
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
_supported_fmt qcow2
_supported_proto file
_supported_os Linux
_unsupported_imgopts cluster_size refcount_bits extended_l2 compat=0.10 data_file
# The cluster size must be at least the granularity of the mirror job (4KB)
# Note that larger cluster sizes will produce very large images (several GBs)
cluster_size=4096
refcount_bits=64 # Make it equal to the L2 entry size for convenience
options="cluster_size=${cluster_size},refcount_bits=${refcount_bits}"
# Number of refcount entries per refcount blocks
ref_entries=$(( ${cluster_size} * 8 / ${refcount_bits} ))
# Number of data clusters needed to fill a refcount block
# Equals ${ref_entries} minus two (one L2 table and one refcount block)
data_clusters_per_refblock=$(( ${ref_entries} - 2 ))
# Number of entries in the refcount cache
ref_blocks=4
# Write enough data clusters to fill the refcount cache and allocate
# one more refcount block.
# Subtract 3 clusters from the total: qcow2 header, refcount table, L1 table
total_data_clusters=$(( ${data_clusters_per_refblock} * ${ref_blocks} + 1 - 3 ))
# Total size to write in bytes
total_size=$(( ${total_data_clusters} * ${cluster_size} ))
echo
echo '### Create the image'
echo
TEST_IMG_FILE=$TEST_IMG.base _make_test_img -o $options $total_size | _filter_img_create_size
echo
echo '### Write data to allocate more refcount blocks than the cache can hold'
echo
$QEMU_IO -c "write -P 1 0 $total_size" $TEST_IMG.base | _filter_qemu_io
echo
echo '### Create an overlay'
echo
_make_test_img -F $IMGFMT -b $TEST_IMG.base -o $options | _filter_img_create_size
echo
echo '### Fill the overlay with zeroes'
echo
$QEMU_IO -c "write -z 0 $total_size" $TEST_IMG | _filter_qemu_io
echo
echo '### Commit changes to the base image'
echo
$QEMU_IMG commit $TEST_IMG
echo
echo '### Check the base image'
echo
$QEMU_IMG check $TEST_IMG.base
# success, all done
echo "*** done"
rm -f $seq.full
status=0

View File

@ -0,0 +1,29 @@
QA output created by 313
### Create the image
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=SIZE
### Write data to allocate more refcount blocks than the cache can hold
wrote 8347648/8347648 bytes at offset 0
7.961 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
### Create an overlay
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT
### Fill the overlay with zeroes
wrote 8347648/8347648 bytes at offset 0
7.961 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
### Commit changes to the base image
Image committed.
### Check the base image
No errors were found on the image.
Image end offset: 8396800
*** done

View File

@ -821,9 +821,10 @@ _supported_cache_modes()
# Check whether the filesystem supports O_DIRECT # Check whether the filesystem supports O_DIRECT
_check_o_direct() _check_o_direct()
{ {
$QEMU_IMG create -f raw "$TEST_IMG".test_o_direct 1M > /dev/null testfile="$TEST_DIR"/_check_o_direct
out=$($QEMU_IO -f raw -t none -c quit "$TEST_IMG".test_o_direct 2>&1) $QEMU_IMG create -f raw "$testfile" 1M > /dev/null
rm -f "$TEST_IMG".test_o_direct out=$($QEMU_IO -f raw -t none -c quit "$testfile" 2>&1)
rm -f "$testfile"
[[ "$out" != *"O_DIRECT"* ]] [[ "$out" != *"O_DIRECT"* ]]
} }

View File

@ -318,4 +318,6 @@
307 rw quick export 307 rw quick export
308 rw 308 rw
309 rw auto quick 309 rw auto quick
310 rw quick
312 rw quick 312 rw quick
313 rw auto quick

View File

@ -75,12 +75,20 @@ qemu_opts = os.environ.get('QEMU_OPTIONS', '').strip().split(' ')
imgfmt = os.environ.get('IMGFMT', 'raw') imgfmt = os.environ.get('IMGFMT', 'raw')
imgproto = os.environ.get('IMGPROTO', 'file') imgproto = os.environ.get('IMGPROTO', 'file')
test_dir = os.environ.get('TEST_DIR')
sock_dir = os.environ.get('SOCK_DIR')
output_dir = os.environ.get('OUTPUT_DIR', '.') output_dir = os.environ.get('OUTPUT_DIR', '.')
cachemode = os.environ.get('CACHEMODE')
aiomode = os.environ.get('AIOMODE') try:
qemu_default_machine = os.environ.get('QEMU_DEFAULT_MACHINE') test_dir = os.environ['TEST_DIR']
sock_dir = os.environ['SOCK_DIR']
cachemode = os.environ['CACHEMODE']
aiomode = os.environ['AIOMODE']
qemu_default_machine = os.environ['QEMU_DEFAULT_MACHINE']
except KeyError:
# We are using these variables as proxies to indicate that we're
# not being run via "check". There may be other things set up by
# "check" that individual test cases rely on.
sys.stderr.write('Please run this test via the "check" script\n')
sys.exit(os.EX_USAGE)
socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper') socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper')
@ -507,12 +515,15 @@ class FilePath:
return False return False
def try_remove(img):
try:
os.remove(img)
except OSError:
pass
def file_path_remover(): def file_path_remover():
for path in reversed(file_path_remover.paths): for path in reversed(file_path_remover.paths):
try: try_remove(path)
os.remove(path)
except OSError:
pass
def file_path(*names, base_dir=test_dir): def file_path(*names, base_dir=test_dir):
@ -1286,14 +1297,6 @@ def execute_setup_common(supported_fmts: Sequence[str] = (),
""" """
# Note: Python 3.6 and pylint do not like 'Collection' so use 'Sequence'. # Note: Python 3.6 and pylint do not like 'Collection' so use 'Sequence'.
# We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to
# indicate that we're not being run via "check". There may be
# other things set up by "check" that individual test cases rely
# on.
if test_dir is None or qemu_default_machine is None:
sys.stderr.write('Please run this test via the "check" script\n')
sys.exit(os.EX_USAGE)
debug = '-d' in sys.argv debug = '-d' in sys.argv
if debug: if debug:
sys.argv.remove('-d') sys.argv.remove('-d')

View File

@ -157,6 +157,7 @@ Coroutine *qemu_coroutine_new(void)
sigset_t sigs; sigset_t sigs;
sigset_t osigs; sigset_t osigs;
sigjmp_buf old_env; sigjmp_buf old_env;
static pthread_mutex_t sigusr2_mutex = PTHREAD_MUTEX_INITIALIZER;
/* The way to manipulate stack is with the sigaltstack function. We /* The way to manipulate stack is with the sigaltstack function. We
* prepare a stack, with it delivering a signal to ourselves and then * prepare a stack, with it delivering a signal to ourselves and then
@ -186,6 +187,12 @@ Coroutine *qemu_coroutine_new(void)
sa.sa_handler = coroutine_trampoline; sa.sa_handler = coroutine_trampoline;
sigfillset(&sa.sa_mask); sigfillset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK; sa.sa_flags = SA_ONSTACK;
/*
* sigaction() is a process-global operation. We must not run
* this code in multiple threads at once.
*/
pthread_mutex_lock(&sigusr2_mutex);
if (sigaction(SIGUSR2, &sa, &osa) != 0) { if (sigaction(SIGUSR2, &sa, &osa) != 0) {
abort(); abort();
} }
@ -234,6 +241,8 @@ Coroutine *qemu_coroutine_new(void)
* Restore the old SIGUSR2 signal handler and mask * Restore the old SIGUSR2 signal handler and mask
*/ */
sigaction(SIGUSR2, &osa, NULL); sigaction(SIGUSR2, &osa, NULL);
pthread_mutex_unlock(&sigusr2_mutex);
pthread_sigmask(SIG_SETMASK, &osigs, NULL); pthread_sigmask(SIG_SETMASK, &osigs, NULL);
/* /*