diff --git a/aio-posix.c b/aio-posix.c index 06148a9ba3..482b316502 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -210,6 +210,7 @@ void aio_set_fd_handler(AioContext *ctx, { AioHandler *node; bool is_new = false; + bool deleted = false; node = find_aio_handler(ctx, fd); @@ -228,7 +229,7 @@ void aio_set_fd_handler(AioContext *ctx, * releasing the walking_handlers lock. */ QLIST_REMOVE(node, node); - g_free(node); + deleted = true; } } } else { @@ -253,6 +254,9 @@ void aio_set_fd_handler(AioContext *ctx, aio_epoll_update(ctx, node, is_new); aio_notify(ctx); + if (deleted) { + g_free(node); + } } void aio_set_event_notifier(AioContext *ctx, diff --git a/blockdev.c b/blockdev.c index fc85128e94..917ae0687f 100644 --- a/blockdev.c +++ b/blockdev.c @@ -300,6 +300,45 @@ static int parse_block_error_action(const char *buf, bool is_read, Error **errp) } } +static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals, + Error **errp) +{ + const QListEntry *entry; + for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) { + switch (qobject_type(entry->value)) { + + case QTYPE_QSTRING: { + unsigned long long length; + const char *str = qstring_get_str(qobject_to_qstring(entry->value)); + if (parse_uint_full(str, &length, 10) == 0 && + length > 0 && length <= UINT_MAX) { + block_acct_add_interval(stats, (unsigned) length); + } else { + error_setg(errp, "Invalid interval length: %s", str); + return false; + } + break; + } + + case QTYPE_QINT: { + int64_t length = qint_get_int(qobject_to_qint(entry->value)); + if (length > 0 && length <= UINT_MAX) { + block_acct_add_interval(stats, (unsigned) length); + } else { + error_setg(errp, "Invalid interval length: %" PRId64, length); + return false; + } + break; + } + + default: + error_setg(errp, "The specification of stats-intervals is invalid"); + return false; + } + } + return true; +} + static bool check_throttle_config(ThrottleConfig *cfg, Error **errp) { if (throttle_conflicting(cfg)) { @@ -442,13 +481,14 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, int bdrv_flags = 0; int on_read_error, on_write_error; bool account_invalid, account_failed; - const char *stats_intervals; BlockBackend *blk; BlockDriverState *bs; ThrottleConfig cfg; int snapshot = 0; Error *error = NULL; QemuOpts *opts; + QDict *interval_dict = NULL; + QList *interval_list = NULL; const char *id; bool has_driver_specific_opts; BlockdevDetectZeroesOptions detect_zeroes = @@ -482,7 +522,14 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true); account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true); - stats_intervals = qemu_opt_get(opts, "stats-intervals"); + qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals."); + qdict_array_split(interval_dict, &interval_list); + + if (qdict_size(interval_dict) != 0) { + error_setg(errp, "Invalid option stats-intervals.%s", + qdict_first(interval_dict)->key); + goto early_err; + } extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg, &detect_zeroes, &error); @@ -583,33 +630,10 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, block_acct_init(blk_get_stats(blk), account_invalid, account_failed); - if (stats_intervals) { - char **intervals = g_strsplit(stats_intervals, ":", 0); - unsigned i; - - if (*stats_intervals == '\0') { - error_setg(&error, "stats-intervals can't have an empty value"); - } - - for (i = 0; !error && intervals[i] != NULL; i++) { - unsigned long long val; - if (parse_uint_full(intervals[i], &val, 10) == 0 && - val > 0 && val <= UINT_MAX) { - block_acct_add_interval(blk_get_stats(blk), val); - } else { - error_setg(&error, "Invalid interval length: '%s'", - intervals[i]); - } - } - - g_strfreev(intervals); - - if (error) { - error_propagate(errp, error); - blk_unref(blk); - blk = NULL; - goto err_no_bs_opts; - } + if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) { + blk_unref(blk); + blk = NULL; + goto err_no_bs_opts; } } @@ -617,10 +641,14 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, err_no_bs_opts: qemu_opts_del(opts); + QDECREF(interval_dict); + QDECREF(interval_list); return blk; early_err: qemu_opts_del(opts); + QDECREF(interval_dict); + QDECREF(interval_list); err_no_opts: QDECREF(bs_opts); return NULL; @@ -3948,11 +3976,6 @@ QemuOptsList qemu_common_drive_opts = { .type = QEMU_OPT_BOOL, .help = "whether to account for failed I/O operations " "in the statistics", - },{ - .name = "stats-intervals", - .type = QEMU_OPT_STRING, - .help = "colon-separated list of intervals " - "for collecting I/O statistics, in seconds", }, { /* end of list */ } }, diff --git a/disas/arm.c b/disas/arm.c index 6165246539..7a7354b76a 100644 --- a/disas/arm.c +++ b/disas/arm.c @@ -1779,7 +1779,7 @@ print_insn_coprocessor (bfd_vma pc, struct disassemble_info *info, long given, /* Is ``imm'' a negative number? */ if (imm & 0x40) - imm |= (-1 << 7); + imm |= (~0u << 7); func (stream, "%d", imm); } diff --git a/docs/bitmaps.md b/docs/bitmaps.md index 9fd8ea65ea..a2e8d51163 100644 --- a/docs/bitmaps.md +++ b/docs/bitmaps.md @@ -19,12 +19,20 @@ which is included at the end of this document. * A dirty bitmap's name is unique to the node, but bitmaps attached to different nodes can share the same name. +* Dirty bitmaps created for internal use by QEMU may be anonymous and have no + name, but any user-created bitmaps may not be. There can be any number of + anonymous bitmaps per node. + +* The name of a user-created bitmap must not be empty (""). + ## Bitmap Modes * A Bitmap can be "frozen," which means that it is currently in-use by a backup operation and cannot be deleted, renamed, written to, reset, etc. +* The normal operating mode for a bitmap is "active." + ## Basic QMP Usage ### Supported Commands ### @@ -319,6 +327,155 @@ full backup as a backing image. "event": "BLOCK_JOB_COMPLETED" } ``` +### Partial Transactional Failures + +* Sometimes, a transaction will succeed in launching and return success, + but then later the backup jobs themselves may fail. It is possible that + a management application may have to deal with a partial backup failure + after a successful transaction. + +* If multiple backup jobs are specified in a single transaction, when one of + them fails, it will not interact with the other backup jobs in any way. + +* The job(s) that succeeded will clear the dirty bitmap associated with the + operation, but the job(s) that failed will not. It is not "safe" to delete + any incremental backups that were created successfully in this scenario, + even though others failed. + +#### Example + +* QMP example highlighting two backup jobs: + + ```json + { "execute": "transaction", + "arguments": { + "actions": [ + { "type": "drive-backup", + "data": { "device": "drive0", "bitmap": "bitmap0", + "format": "qcow2", "mode": "existing", + "sync": "incremental", "target": "d0-incr-1.qcow2" } }, + { "type": "drive-backup", + "data": { "device": "drive1", "bitmap": "bitmap1", + "format": "qcow2", "mode": "existing", + "sync": "incremental", "target": "d1-incr-1.qcow2" } }, + ] + } + } + ``` + +* QMP example response, highlighting one success and one failure: + * Acknowledgement that the Transaction was accepted and jobs were launched: + ```json + { "return": {} } + ``` + + * Later, QEMU sends notice that the first job was completed: + ```json + { "timestamp": { "seconds": 1447192343, "microseconds": 615698 }, + "data": { "device": "drive0", "type": "backup", + "speed": 0, "len": 67108864, "offset": 67108864 }, + "event": "BLOCK_JOB_COMPLETED" + } + ``` + + * Later yet, QEMU sends notice that the second job has failed: + ```json + { "timestamp": { "seconds": 1447192399, "microseconds": 683015 }, + "data": { "device": "drive1", "action": "report", + "operation": "read" }, + "event": "BLOCK_JOB_ERROR" } + ``` + + ```json + { "timestamp": { "seconds": 1447192399, "microseconds": 685853 }, + "data": { "speed": 0, "offset": 0, "len": 67108864, + "error": "Input/output error", + "device": "drive1", "type": "backup" }, + "event": "BLOCK_JOB_COMPLETED" } + +* In the above example, "d0-incr-1.qcow2" is valid and must be kept, + but "d1-incr-1.qcow2" is invalid and should be deleted. If a VM-wide + incremental backup of all drives at a point-in-time is to be made, + new backups for both drives will need to be made, taking into account + that a new incremental backup for drive0 needs to be based on top of + "d0-incr-1.qcow2." + +### Grouped Completion Mode + +* While jobs launched by transactions normally complete or fail on their own, + it is possible to instruct them to complete or fail together as a group. + +* QMP transactions take an optional properties structure that can affect + the semantics of the transaction. + +* The "completion-mode" transaction property can be either "individual" + which is the default, legacy behavior described above, or "grouped," + a new behavior detailed below. + +* Delayed Completion: In grouped completion mode, no jobs will report + success until all jobs are ready to report success. + +* Grouped failure: If any job fails in grouped completion mode, all remaining + jobs will be cancelled. Any incremental backups will restore their dirty + bitmap objects as if no backup command was ever issued. + + * Regardless of if QEMU reports a particular incremental backup job as + CANCELLED or as an ERROR, the in-memory bitmap will be restored. + +#### Example + +* Here's the same example scenario from above with the new property: + + ```json + { "execute": "transaction", + "arguments": { + "actions": [ + { "type": "drive-backup", + "data": { "device": "drive0", "bitmap": "bitmap0", + "format": "qcow2", "mode": "existing", + "sync": "incremental", "target": "d0-incr-1.qcow2" } }, + { "type": "drive-backup", + "data": { "device": "drive1", "bitmap": "bitmap1", + "format": "qcow2", "mode": "existing", + "sync": "incremental", "target": "d1-incr-1.qcow2" } }, + ], + "properties": { + "completion-mode": "grouped" + } + } + } + ``` + +* QMP example response, highlighting a failure for drive2: + * Acknowledgement that the Transaction was accepted and jobs were launched: + ```json + { "return": {} } + ``` + + * Later, QEMU sends notice that the second job has errored out, + but that the first job was also cancelled: + ```json + { "timestamp": { "seconds": 1447193702, "microseconds": 632377 }, + "data": { "device": "drive1", "action": "report", + "operation": "read" }, + "event": "BLOCK_JOB_ERROR" } + ``` + + ```json + { "timestamp": { "seconds": 1447193702, "microseconds": 640074 }, + "data": { "speed": 0, "offset": 0, "len": 67108864, + "error": "Input/output error", + "device": "drive1", "type": "backup" }, + "event": "BLOCK_JOB_COMPLETED" } + ``` + + ```json + { "timestamp": { "seconds": 1447193702, "microseconds": 640163 }, + "data": { "device": "drive0", "type": "backup", "speed": 0, + "len": 67108864, "offset": 16777216 }, + "event": "BLOCK_JOB_CANCELLED" } + ``` +