mirror of https://github.com/xemu-project/xemu.git
Block patches:
- Several qcow2 fixes and refactorings - Let qemu-img convert try to stay at cluster boundaries - Stable child names for quorum (with x-blockdev-change) - Explicitly drop vhdx 4k sector support, as it was never actually working - rbd: Mark @namespace a strong runtime option - iotests.py improvements - Drop unused runtime_opts objects - Skip a test case in 030 when run through make check-block -----BEGIN PGP SIGNATURE----- iQFFBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl9glvkSHG1yZWl0ekBy ZWRoYXQuY29tAAoJEPQH2wBh1c9A/0cH+MR1uFlqNuL4Q8vnZPyEEB6FniWgIF/K oMivXdZe9F5UjUPx+2I8iOpzBnT+lMGQZzaNSOZtI9Rv6JqBTjA9xlVWyVo5SVzF cNfVE00lCnBRsYGiycOkOyTnP0PuxlFJDRwdozgumch3akN/0Ep9npL7So2BMwVf hrPH4VVAihf0ZZaEH9JN2Sgm8/ffpXFcGtg8uoS1NqgK42fGWp4sip6mPFulNDBE 4HFLv98/hKqBlU5+sYe9mKo7SJSbqEXMxR7AHmDxM9qBbzFF2SPkZoSEaZlqw+bz YlR/EDVVbAAAOfKeG8DPjfNwZHVdusMmOpzKTG9QhEOxyK2Vbef1lQ== =7AVw -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-09-15' into staging Block patches: - Several qcow2 fixes and refactorings - Let qemu-img convert try to stay at cluster boundaries - Stable child names for quorum (with x-blockdev-change) - Explicitly drop vhdx 4k sector support, as it was never actually working - rbd: Mark @namespace a strong runtime option - iotests.py improvements - Drop unused runtime_opts objects - Skip a test case in 030 when run through make check-block # gpg: Signature made Tue 15 Sep 2020 11:27:05 BST # gpg: using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40 # gpg: issuer "mreitz@redhat.com" # gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full] # Primary key fingerprint: 91BE B60A 30DB 3E88 57D1 1829 F407 DB00 61D5 CF40 * remotes/maxreitz/tags/pull-block-2020-09-15: (22 commits) block/rbd: add 'namespace' to qemu_rbd_strong_runtime_opts[] qcow2: Convert qcow2_alloc_cluster_offset() into qcow2_alloc_host_offset() qcow2: Make preallocate_co() resize the image to the correct size block/qcow: remove runtime opts block/rbd: remove runtime_opts qcow2: Return the original error code in qcow2_co_pwrite_zeroes() qcow2: Make qcow2_free_any_clusters() free only one cluster qcow2: Handle QCowL2Meta on error in preallocate_co() block/vhdx: Support vhdx image only with 512 bytes logical sector size iotests: Skip test_stream_parallel in test 030 when doing "make check" qemu-img: Explicit number replaced by a constant qcow2: Rewrite the documentation of qcow2_alloc_cluster_offset() qcow2: Don't check nb_clusters when removing l2meta from the list qcow2: Fix removal of list members from BDRVQcow2State.cluster_allocs qcow2: Use macros for the L1, refcount and bitmap table entry sizes qemu-img: avoid unaligned read requests during convert block/quorum.c: stable children names qemu-iotests: Simplify FilePath __init__ qemu-iotests: Merge FilePaths and FilePath qemu-iotests: Support varargs syntax in FilePaths ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9b14671aec
|
@ -105,15 +105,6 @@ static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static QemuOptsList qcow_runtime_opts = {
|
|
||||||
.name = "qcow",
|
|
||||||
.head = QTAILQ_HEAD_INITIALIZER(qcow_runtime_opts.head),
|
|
||||||
.desc = {
|
|
||||||
BLOCK_CRYPTO_OPT_DEF_QCOW_KEY_SECRET("encrypt."),
|
|
||||||
{ /* end of list */ }
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
|
|
|
@ -42,6 +42,9 @@
|
||||||
#define BME_MIN_GRANULARITY_BITS 9
|
#define BME_MIN_GRANULARITY_BITS 9
|
||||||
#define BME_MAX_NAME_SIZE 1023
|
#define BME_MAX_NAME_SIZE 1023
|
||||||
|
|
||||||
|
/* Size of bitmap table entries */
|
||||||
|
#define BME_TABLE_ENTRY_SIZE (sizeof(uint64_t))
|
||||||
|
|
||||||
QEMU_BUILD_BUG_ON(BME_MAX_NAME_SIZE != BDRV_BITMAP_MAX_NAME_SIZE);
|
QEMU_BUILD_BUG_ON(BME_MAX_NAME_SIZE != BDRV_BITMAP_MAX_NAME_SIZE);
|
||||||
|
|
||||||
#if BME_MAX_TABLE_SIZE * 8ULL > INT_MAX
|
#if BME_MAX_TABLE_SIZE * 8ULL > INT_MAX
|
||||||
|
@ -232,7 +235,7 @@ static int bitmap_table_load(BlockDriverState *bs, Qcow2BitmapTable *tb,
|
||||||
|
|
||||||
assert(tb->size <= BME_MAX_TABLE_SIZE);
|
assert(tb->size <= BME_MAX_TABLE_SIZE);
|
||||||
ret = bdrv_pread(bs->file, tb->offset,
|
ret = bdrv_pread(bs->file, tb->offset,
|
||||||
table, tb->size * sizeof(uint64_t));
|
table, tb->size * BME_TABLE_ENTRY_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -265,7 +268,7 @@ static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_bitmap_table(bs, bitmap_table, tb->size);
|
clear_bitmap_table(bs, bitmap_table, tb->size);
|
||||||
qcow2_free_clusters(bs, tb->offset, tb->size * sizeof(uint64_t),
|
qcow2_free_clusters(bs, tb->offset, tb->size * BME_TABLE_ENTRY_SIZE,
|
||||||
QCOW2_DISCARD_OTHER);
|
QCOW2_DISCARD_OTHER);
|
||||||
g_free(bitmap_table);
|
g_free(bitmap_table);
|
||||||
|
|
||||||
|
@ -690,7 +693,7 @@ int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||||
ret = qcow2_inc_refcounts_imrt(bs, res,
|
ret = qcow2_inc_refcounts_imrt(bs, res,
|
||||||
refcount_table, refcount_table_size,
|
refcount_table, refcount_table_size,
|
||||||
bm->table.offset,
|
bm->table.offset,
|
||||||
bm->table.size * sizeof(uint64_t));
|
bm->table.size * BME_TABLE_ENTRY_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1797,7 +1800,7 @@ uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *in_bs,
|
||||||
/* Assume the entire bitmap is allocated */
|
/* Assume the entire bitmap is allocated */
|
||||||
bitmaps_size += bmclusters * cluster_size;
|
bitmaps_size += bmclusters * cluster_size;
|
||||||
/* Also reserve space for the bitmap table entries */
|
/* Also reserve space for the bitmap table entries */
|
||||||
bitmaps_size += ROUND_UP(bmclusters * sizeof(uint64_t),
|
bitmaps_size += ROUND_UP(bmclusters * BME_TABLE_ENTRY_SIZE,
|
||||||
cluster_size);
|
cluster_size);
|
||||||
/* And space for contribution to bitmap directory size */
|
/* And space for contribution to bitmap directory size */
|
||||||
bitmap_dir_size += calc_dir_entry_size(strlen(name), 0);
|
bitmap_dir_size += calc_dir_entry_size(strlen(name), 0);
|
||||||
|
|
|
@ -47,8 +47,8 @@ int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
|
||||||
|
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
|
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
|
||||||
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
|
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
|
||||||
new_l1_size * sizeof(uint64_t),
|
new_l1_size * L1E_SIZE,
|
||||||
(s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
|
(s->l1_size - new_l1_size) * L1E_SIZE, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ fail:
|
||||||
* l1_table in memory to avoid possible image corruption.
|
* l1_table in memory to avoid possible image corruption.
|
||||||
*/
|
*/
|
||||||
memset(s->l1_table + new_l1_size, 0,
|
memset(s->l1_table + new_l1_size, 0,
|
||||||
(s->l1_size - new_l1_size) * sizeof(uint64_t));
|
(s->l1_size - new_l1_size) * L1E_SIZE);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||||
/* Do a sanity check on min_size before trying to calculate new_l1_size
|
/* Do a sanity check on min_size before trying to calculate new_l1_size
|
||||||
* (this prevents overflows during the while loop for the calculation of
|
* (this prevents overflows during the while loop for the calculation of
|
||||||
* new_l1_size) */
|
* new_l1_size) */
|
||||||
if (min_size > INT_MAX / sizeof(uint64_t)) {
|
if (min_size > INT_MAX / L1E_SIZE) {
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
|
QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
|
||||||
if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
|
if (new_l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||||
s->l1_size, new_l1_size);
|
s->l1_size, new_l1_size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
|
new_l1_size2 = L1E_SIZE * new_l1_size;
|
||||||
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2);
|
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2);
|
||||||
if (new_l1_table == NULL) {
|
if (new_l1_table == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -131,7 +131,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||||
memset(new_l1_table, 0, new_l1_size2);
|
memset(new_l1_table, 0, new_l1_size2);
|
||||||
|
|
||||||
if (s->l1_size) {
|
if (s->l1_size) {
|
||||||
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
|
memcpy(new_l1_table, s->l1_table, s->l1_size * L1E_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write new table (align to cluster) */
|
/* write new table (align to cluster) */
|
||||||
|
@ -180,7 +180,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
||||||
s->l1_table = new_l1_table;
|
s->l1_table = new_l1_table;
|
||||||
old_l1_size = s->l1_size;
|
old_l1_size = s->l1_size;
|
||||||
s->l1_size = new_l1_size;
|
s->l1_size = new_l1_size;
|
||||||
qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
|
qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * L1E_SIZE,
|
||||||
QCOW2_DISCARD_OTHER);
|
QCOW2_DISCARD_OTHER);
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
|
@ -225,9 +225,9 @@ int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
|
||||||
BDRVQcow2State *s = bs->opaque;
|
BDRVQcow2State *s = bs->opaque;
|
||||||
int l1_start_index;
|
int l1_start_index;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
int bufsize = MAX(sizeof(uint64_t),
|
int bufsize = MAX(L1E_SIZE,
|
||||||
MIN(bs->file->bs->bl.request_alignment, s->cluster_size));
|
MIN(bs->file->bs->bl.request_alignment, s->cluster_size));
|
||||||
int nentries = bufsize / sizeof(uint64_t);
|
int nentries = bufsize / L1E_SIZE;
|
||||||
g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries);
|
g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries);
|
||||||
|
|
||||||
if (buf == NULL) {
|
if (buf == NULL) {
|
||||||
|
@ -1096,7 +1096,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
||||||
*/
|
*/
|
||||||
if (!m->keep_old_clusters && j != 0) {
|
if (!m->keep_old_clusters && j != 0) {
|
||||||
for (i = 0; i < j; i++) {
|
for (i = 0; i < j; i++) {
|
||||||
qcow2_free_any_clusters(bs, old_cluster[i], 1, QCOW2_DISCARD_NEVER);
|
qcow2_free_any_cluster(bs, old_cluster[i], QCOW2_DISCARD_NEVER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1710,34 +1710,39 @@ static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
||||||
if (ret < 0 && *m && (*m)->nb_clusters > 0) {
|
|
||||||
QLIST_REMOVE(*m, next_in_flight);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* alloc_cluster_offset
|
* For a given area on the virtual disk defined by @offset and @bytes,
|
||||||
|
* find the corresponding area on the qcow2 image, allocating new
|
||||||
|
* clusters (or subclusters) if necessary. The result can span a
|
||||||
|
* combination of allocated and previously unallocated clusters.
|
||||||
*
|
*
|
||||||
* For a given offset on the virtual disk, find the cluster offset in qcow2
|
* Note that offset may not be cluster aligned. In this case, the returned
|
||||||
* file. If the offset is not found, allocate a new cluster.
|
* *host_offset points to exact byte referenced by offset and therefore
|
||||||
|
* isn't cluster aligned as well.
|
||||||
*
|
*
|
||||||
* If the cluster was already allocated, m->nb_clusters is set to 0 and
|
* On return, @host_offset is set to the beginning of the requested
|
||||||
* other fields in m are meaningless.
|
* area. This area is guaranteed to be contiguous on the qcow2 file
|
||||||
|
* but it can be smaller than initially requested. In this case @bytes
|
||||||
|
* is updated with the actual size.
|
||||||
*
|
*
|
||||||
* If the cluster is newly allocated, m->nb_clusters is set to the number of
|
* If any clusters or subclusters were allocated then @m contains a
|
||||||
* contiguous clusters that have been allocated. In this case, the other
|
* list with the information of all the affected regions. Note that
|
||||||
* fields of m are valid and contain information about the first allocated
|
* this can happen regardless of whether this function succeeds or
|
||||||
* cluster.
|
* not. The caller is responsible for updating the L2 metadata of the
|
||||||
|
* allocated clusters (on success) or freeing them (on failure), and
|
||||||
|
* for clearing the contents of @m afterwards in both cases.
|
||||||
*
|
*
|
||||||
* If the request conflicts with another write request in flight, the coroutine
|
* If the request conflicts with another write request in flight, the coroutine
|
||||||
* is queued and will be reentered when the dependency has completed.
|
* is queued and will be reentered when the dependency has completed.
|
||||||
*
|
*
|
||||||
* Return 0 on success and -errno in error cases
|
* Return 0 on success and -errno in error cases
|
||||||
*/
|
*/
|
||||||
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||||
unsigned int *bytes, uint64_t *host_offset,
|
unsigned int *bytes, uint64_t *host_offset,
|
||||||
QCowL2Meta **m)
|
QCowL2Meta **m)
|
||||||
{
|
{
|
||||||
BDRVQcow2State *s = bs->opaque;
|
BDRVQcow2State *s = bs->opaque;
|
||||||
uint64_t start, remaining;
|
uint64_t start, remaining;
|
||||||
|
@ -1758,7 +1763,7 @@ again:
|
||||||
while (true) {
|
while (true) {
|
||||||
|
|
||||||
if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
|
if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
|
||||||
*host_offset = start_of_cluster(s, cluster_offset);
|
*host_offset = cluster_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(remaining >= cur_bytes);
|
assert(remaining >= cur_bytes);
|
||||||
|
@ -1841,6 +1846,8 @@ again:
|
||||||
*bytes -= remaining;
|
*bytes -= remaining;
|
||||||
assert(*bytes > 0);
|
assert(*bytes > 0);
|
||||||
assert(*host_offset != INV_OFFSET);
|
assert(*host_offset != INV_OFFSET);
|
||||||
|
assert(offset_into_cluster(s, *host_offset) ==
|
||||||
|
offset_into_cluster(s, offset));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1912,7 +1919,7 @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||||
set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
|
set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
|
||||||
}
|
}
|
||||||
/* Then decrease the refcount */
|
/* Then decrease the refcount */
|
||||||
qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
|
qcow2_free_any_cluster(bs, old_l2_entry, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
||||||
|
@ -2004,7 +2011,7 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
|
||||||
|
|
||||||
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
|
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
|
||||||
if (unmap) {
|
if (unmap) {
|
||||||
qcow2_free_any_clusters(bs, old_l2_entry, 1, QCOW2_DISCARD_REQUEST);
|
qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
|
||||||
}
|
}
|
||||||
set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
|
set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
|
||||||
if (has_subclusters(s)) {
|
if (has_subclusters(s)) {
|
||||||
|
@ -2410,7 +2417,7 @@ int qcow2_expand_zero_clusters(BlockDriverState *bs,
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
|
ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
|
||||||
s->snapshots[i].l1_size, sizeof(uint64_t),
|
s->snapshots[i].l1_size, L1E_SIZE,
|
||||||
QCOW_MAX_L1_SIZE, "Snapshot L1 table",
|
QCOW_MAX_L1_SIZE, "Snapshot L1 table",
|
||||||
&local_err);
|
&local_err);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -2418,7 +2425,7 @@ int qcow2_expand_zero_clusters(BlockDriverState *bs,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
|
l1_size2 = s->snapshots[i].l1_size * L1E_SIZE;
|
||||||
new_l1_table = g_try_realloc(l1_table, l1_size2);
|
new_l1_table = g_try_realloc(l1_table, l1_size2);
|
||||||
|
|
||||||
if (!new_l1_table) {
|
if (!new_l1_table) {
|
||||||
|
|
|
@ -105,8 +105,8 @@ int qcow2_refcount_init(BlockDriverState *bs)
|
||||||
s->get_refcount = get_refcount_funcs[s->refcount_order];
|
s->get_refcount = get_refcount_funcs[s->refcount_order];
|
||||||
s->set_refcount = set_refcount_funcs[s->refcount_order];
|
s->set_refcount = set_refcount_funcs[s->refcount_order];
|
||||||
|
|
||||||
assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
|
assert(s->refcount_table_size <= INT_MAX / REFTABLE_ENTRY_SIZE);
|
||||||
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
|
refcount_table_size2 = s->refcount_table_size * REFTABLE_ENTRY_SIZE;
|
||||||
s->refcount_table = g_try_malloc(refcount_table_size2);
|
s->refcount_table = g_try_malloc(refcount_table_size2);
|
||||||
|
|
||||||
if (s->refcount_table_size > 0) {
|
if (s->refcount_table_size > 0) {
|
||||||
|
@ -434,8 +434,8 @@ static int alloc_refcount_block(BlockDriverState *bs,
|
||||||
if (refcount_table_index < s->refcount_table_size) {
|
if (refcount_table_index < s->refcount_table_size) {
|
||||||
uint64_t data64 = cpu_to_be64(new_block);
|
uint64_t data64 = cpu_to_be64(new_block);
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
|
||||||
ret = bdrv_pwrite_sync(bs->file,
|
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset +
|
||||||
s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
|
refcount_table_index * REFTABLE_ENTRY_SIZE,
|
||||||
&data64, sizeof(data64));
|
&data64, sizeof(data64));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -562,8 +562,8 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
|
||||||
DIV_ROUND_UP(total_refblock_count, 2);
|
DIV_ROUND_UP(total_refblock_count, 2);
|
||||||
}
|
}
|
||||||
/* The qcow2 file can only store the reftable size in number of clusters */
|
/* The qcow2 file can only store the reftable size in number of clusters */
|
||||||
table_size = ROUND_UP(table_size, s->cluster_size / sizeof(uint64_t));
|
table_size = ROUND_UP(table_size, s->cluster_size / REFTABLE_ENTRY_SIZE);
|
||||||
table_clusters = (table_size * sizeof(uint64_t)) / s->cluster_size;
|
table_clusters = (table_size * REFTABLE_ENTRY_SIZE) / s->cluster_size;
|
||||||
|
|
||||||
if (table_size > QCOW_MAX_REFTABLE_SIZE) {
|
if (table_size > QCOW_MAX_REFTABLE_SIZE) {
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
|
@ -581,13 +581,13 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
|
||||||
if (table_size > s->max_refcount_table_index) {
|
if (table_size > s->max_refcount_table_index) {
|
||||||
/* We're actually growing the reftable */
|
/* We're actually growing the reftable */
|
||||||
memcpy(new_table, s->refcount_table,
|
memcpy(new_table, s->refcount_table,
|
||||||
(s->max_refcount_table_index + 1) * sizeof(uint64_t));
|
(s->max_refcount_table_index + 1) * REFTABLE_ENTRY_SIZE);
|
||||||
} else {
|
} else {
|
||||||
/* Improbable case: We're shrinking the reftable. However, the caller
|
/* Improbable case: We're shrinking the reftable. However, the caller
|
||||||
* has assured us that there is only empty space beyond @start_offset,
|
* has assured us that there is only empty space beyond @start_offset,
|
||||||
* so we can simply drop all of the refblocks that won't fit into the
|
* so we can simply drop all of the refblocks that won't fit into the
|
||||||
* new reftable. */
|
* new reftable. */
|
||||||
memcpy(new_table, s->refcount_table, table_size * sizeof(uint64_t));
|
memcpy(new_table, s->refcount_table, table_size * REFTABLE_ENTRY_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_refblock_offset) {
|
if (new_refblock_offset) {
|
||||||
|
@ -682,7 +682,7 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
|
||||||
|
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
|
||||||
ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
|
ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
|
||||||
table_size * sizeof(uint64_t));
|
table_size * REFTABLE_ENTRY_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -717,7 +717,8 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
|
||||||
update_max_refcount_table_index(s);
|
update_max_refcount_table_index(s);
|
||||||
|
|
||||||
/* Free old table. */
|
/* Free old table. */
|
||||||
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
|
qcow2_free_clusters(bs, old_table_offset,
|
||||||
|
old_table_size * REFTABLE_ENTRY_SIZE,
|
||||||
QCOW2_DISCARD_OTHER);
|
QCOW2_DISCARD_OTHER);
|
||||||
|
|
||||||
return end_offset;
|
return end_offset;
|
||||||
|
@ -1156,8 +1157,8 @@ void qcow2_free_clusters(BlockDriverState *bs,
|
||||||
* Free a cluster using its L2 entry (handles clusters of all types, e.g.
|
* Free a cluster using its L2 entry (handles clusters of all types, e.g.
|
||||||
* normal cluster, compressed cluster, etc.)
|
* normal cluster, compressed cluster, etc.)
|
||||||
*/
|
*/
|
||||||
void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
|
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
|
||||||
int nb_clusters, enum qcow2_discard_type type)
|
enum qcow2_discard_type type)
|
||||||
{
|
{
|
||||||
BDRVQcow2State *s = bs->opaque;
|
BDRVQcow2State *s = bs->opaque;
|
||||||
QCow2ClusterType ctype = qcow2_get_cluster_type(bs, l2_entry);
|
QCow2ClusterType ctype = qcow2_get_cluster_type(bs, l2_entry);
|
||||||
|
@ -1168,7 +1169,7 @@ void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
|
||||||
ctype == QCOW2_CLUSTER_ZERO_ALLOC))
|
ctype == QCOW2_CLUSTER_ZERO_ALLOC))
|
||||||
{
|
{
|
||||||
bdrv_pdiscard(s->data_file, l2_entry & L2E_OFFSET_MASK,
|
bdrv_pdiscard(s->data_file, l2_entry & L2E_OFFSET_MASK,
|
||||||
nb_clusters << s->cluster_bits);
|
s->cluster_size);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1191,7 +1192,7 @@ void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
|
||||||
l2_entry & L2E_OFFSET_MASK);
|
l2_entry & L2E_OFFSET_MASK);
|
||||||
} else {
|
} else {
|
||||||
qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
|
qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
|
||||||
nb_clusters << s->cluster_bits, type);
|
s->cluster_size, type);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case QCOW2_CLUSTER_ZERO_PLAIN:
|
case QCOW2_CLUSTER_ZERO_PLAIN:
|
||||||
|
@ -1253,7 +1254,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
|
||||||
|
|
||||||
l2_slice = NULL;
|
l2_slice = NULL;
|
||||||
l1_table = NULL;
|
l1_table = NULL;
|
||||||
l1_size2 = l1_size * sizeof(uint64_t);
|
l1_size2 = l1_size * L1E_SIZE;
|
||||||
slice_size2 = s->l2_slice_size * l2_entry_size(s);
|
slice_size2 = s->l2_slice_size * l2_entry_size(s);
|
||||||
n_slices = s->cluster_size / slice_size2;
|
n_slices = s->cluster_size / slice_size2;
|
||||||
|
|
||||||
|
@ -1784,7 +1785,7 @@ static int check_refcounts_l1(BlockDriverState *bs,
|
||||||
uint64_t *l1_table = NULL, l2_offset, l1_size2;
|
uint64_t *l1_table = NULL, l2_offset, l1_size2;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
l1_size2 = l1_size * sizeof(uint64_t);
|
l1_size2 = l1_size * L1E_SIZE;
|
||||||
|
|
||||||
/* Mark L1 table as used */
|
/* Mark L1 table as used */
|
||||||
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
|
||||||
|
@ -2146,7 +2147,7 @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||||
res->corruptions++;
|
res->corruptions++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (sn->l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
|
if (sn->l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
|
||||||
fprintf(stderr, "ERROR snapshot %s (%s) l1_size=%#" PRIx32 ": "
|
fprintf(stderr, "ERROR snapshot %s (%s) l1_size=%#" PRIx32 ": "
|
||||||
"L1 table is too large; snapshot table entry corrupted\n",
|
"L1 table is too large; snapshot table entry corrupted\n",
|
||||||
sn->id_str, sn->name, sn->l1_size);
|
sn->id_str, sn->name, sn->l1_size);
|
||||||
|
@ -2169,7 +2170,8 @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
||||||
/* refcount data */
|
/* refcount data */
|
||||||
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
||||||
s->refcount_table_offset,
|
s->refcount_table_offset,
|
||||||
s->refcount_table_size * sizeof(uint64_t));
|
s->refcount_table_size *
|
||||||
|
REFTABLE_ENTRY_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2390,11 +2392,11 @@ write_refblocks:
|
||||||
uint32_t old_reftable_size = reftable_size;
|
uint32_t old_reftable_size = reftable_size;
|
||||||
uint64_t *new_on_disk_reftable;
|
uint64_t *new_on_disk_reftable;
|
||||||
|
|
||||||
reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
|
reftable_size = ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE,
|
||||||
s->cluster_size) / sizeof(uint64_t);
|
s->cluster_size) / REFTABLE_ENTRY_SIZE;
|
||||||
new_on_disk_reftable = g_try_realloc(on_disk_reftable,
|
new_on_disk_reftable = g_try_realloc(on_disk_reftable,
|
||||||
reftable_size *
|
reftable_size *
|
||||||
sizeof(uint64_t));
|
REFTABLE_ENTRY_SIZE);
|
||||||
if (!new_on_disk_reftable) {
|
if (!new_on_disk_reftable) {
|
||||||
res->check_errors++;
|
res->check_errors++;
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -2403,7 +2405,7 @@ write_refblocks:
|
||||||
on_disk_reftable = new_on_disk_reftable;
|
on_disk_reftable = new_on_disk_reftable;
|
||||||
|
|
||||||
memset(on_disk_reftable + old_reftable_size, 0,
|
memset(on_disk_reftable + old_reftable_size, 0,
|
||||||
(reftable_size - old_reftable_size) * sizeof(uint64_t));
|
(reftable_size - old_reftable_size) * REFTABLE_ENTRY_SIZE);
|
||||||
|
|
||||||
/* The offset we have for the reftable is now no longer valid;
|
/* The offset we have for the reftable is now no longer valid;
|
||||||
* this will leak that range, but we can easily fix that by running
|
* this will leak that range, but we can easily fix that by running
|
||||||
|
@ -2420,7 +2422,7 @@ write_refblocks:
|
||||||
reftable_offset < 0)
|
reftable_offset < 0)
|
||||||
{
|
{
|
||||||
uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
|
uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
|
||||||
sizeof(uint64_t));
|
REFTABLE_ENTRY_SIZE);
|
||||||
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
|
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
|
||||||
refcount_table, nb_clusters,
|
refcount_table, nb_clusters,
|
||||||
&first_free_cluster);
|
&first_free_cluster);
|
||||||
|
@ -2460,8 +2462,8 @@ write_refblocks:
|
||||||
uint64_t post_refblock_start, reftable_clusters;
|
uint64_t post_refblock_start, reftable_clusters;
|
||||||
|
|
||||||
post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
|
post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
|
||||||
reftable_clusters = size_to_clusters(s,
|
reftable_clusters =
|
||||||
reftable_size * sizeof(uint64_t));
|
size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE);
|
||||||
/* Not pretty but simple */
|
/* Not pretty but simple */
|
||||||
if (first_free_cluster < post_refblock_start) {
|
if (first_free_cluster < post_refblock_start) {
|
||||||
first_free_cluster = post_refblock_start;
|
first_free_cluster = post_refblock_start;
|
||||||
|
@ -2485,16 +2487,16 @@ write_refblocks:
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
|
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
|
||||||
reftable_size * sizeof(uint64_t),
|
reftable_size * REFTABLE_ENTRY_SIZE,
|
||||||
false);
|
false);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
|
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(reftable_size < INT_MAX / sizeof(uint64_t));
|
assert(reftable_size < INT_MAX / REFTABLE_ENTRY_SIZE);
|
||||||
ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
|
ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
|
||||||
reftable_size * sizeof(uint64_t));
|
reftable_size * REFTABLE_ENTRY_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
|
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -2503,7 +2505,7 @@ write_refblocks:
|
||||||
/* Enter new reftable into the image header */
|
/* Enter new reftable into the image header */
|
||||||
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
|
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
|
||||||
reftable_offset_and_clusters.reftable_clusters =
|
reftable_offset_and_clusters.reftable_clusters =
|
||||||
cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t)));
|
cpu_to_be32(size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE));
|
||||||
ret = bdrv_pwrite_sync(bs->file,
|
ret = bdrv_pwrite_sync(bs->file,
|
||||||
offsetof(QCowHeader, refcount_table_offset),
|
offsetof(QCowHeader, refcount_table_offset),
|
||||||
&reftable_offset_and_clusters,
|
&reftable_offset_and_clusters,
|
||||||
|
@ -2693,14 +2695,14 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
|
||||||
offset = start_of_cluster(s, offset);
|
offset = start_of_cluster(s, offset);
|
||||||
|
|
||||||
if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
|
if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
|
||||||
if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
|
if (overlaps_with(s->l1_table_offset, s->l1_size * L1E_SIZE)) {
|
||||||
return QCOW2_OL_ACTIVE_L1;
|
return QCOW2_OL_ACTIVE_L1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
|
if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
|
||||||
if (overlaps_with(s->refcount_table_offset,
|
if (overlaps_with(s->refcount_table_offset,
|
||||||
s->refcount_table_size * sizeof(uint64_t))) {
|
s->refcount_table_size * REFTABLE_ENTRY_SIZE)) {
|
||||||
return QCOW2_OL_REFCOUNT_TABLE;
|
return QCOW2_OL_REFCOUNT_TABLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2715,7 +2717,7 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
|
||||||
for (i = 0; i < s->nb_snapshots; i++) {
|
for (i = 0; i < s->nb_snapshots; i++) {
|
||||||
if (s->snapshots[i].l1_size &&
|
if (s->snapshots[i].l1_size &&
|
||||||
overlaps_with(s->snapshots[i].l1_table_offset,
|
overlaps_with(s->snapshots[i].l1_table_offset,
|
||||||
s->snapshots[i].l1_size * sizeof(uint64_t))) {
|
s->snapshots[i].l1_size * L1E_SIZE)) {
|
||||||
return QCOW2_OL_INACTIVE_L1;
|
return QCOW2_OL_INACTIVE_L1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2749,11 +2751,11 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
|
||||||
for (i = 0; i < s->nb_snapshots; i++) {
|
for (i = 0; i < s->nb_snapshots; i++) {
|
||||||
uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
|
uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
|
||||||
uint32_t l1_sz = s->snapshots[i].l1_size;
|
uint32_t l1_sz = s->snapshots[i].l1_size;
|
||||||
uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
|
uint64_t l1_sz2 = l1_sz * L1E_SIZE;
|
||||||
uint64_t *l1;
|
uint64_t *l1;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = qcow2_validate_table(bs, l1_ofs, l1_sz, sizeof(uint64_t),
|
ret = qcow2_validate_table(bs, l1_ofs, l1_sz, L1E_SIZE,
|
||||||
QCOW_MAX_L1_SIZE, "", NULL);
|
QCOW_MAX_L1_SIZE, "", NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2877,8 +2879,8 @@ static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||||
uint64_t new_reftable_size;
|
uint64_t new_reftable_size;
|
||||||
|
|
||||||
new_reftable_size = ROUND_UP(reftable_index + 1,
|
new_reftable_size = ROUND_UP(reftable_index + 1,
|
||||||
s->cluster_size / sizeof(uint64_t));
|
s->cluster_size / REFTABLE_ENTRY_SIZE);
|
||||||
if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
|
if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / REFTABLE_ENTRY_SIZE) {
|
||||||
error_setg(errp,
|
error_setg(errp,
|
||||||
"This operation would make the refcount table grow "
|
"This operation would make the refcount table grow "
|
||||||
"beyond the maximum size supported by QEMU, aborting");
|
"beyond the maximum size supported by QEMU, aborting");
|
||||||
|
@ -2886,14 +2888,14 @@ static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
|
||||||
}
|
}
|
||||||
|
|
||||||
new_reftable = g_try_realloc(*reftable, new_reftable_size *
|
new_reftable = g_try_realloc(*reftable, new_reftable_size *
|
||||||
sizeof(uint64_t));
|
REFTABLE_ENTRY_SIZE);
|
||||||
if (!new_reftable) {
|
if (!new_reftable) {
|
||||||
error_setg(errp, "Failed to increase reftable buffer size");
|
error_setg(errp, "Failed to increase reftable buffer size");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(new_reftable + *reftable_size, 0,
|
memset(new_reftable + *reftable_size, 0,
|
||||||
(new_reftable_size - *reftable_size) * sizeof(uint64_t));
|
(new_reftable_size - *reftable_size) * REFTABLE_ENTRY_SIZE);
|
||||||
|
|
||||||
*reftable = new_reftable;
|
*reftable = new_reftable;
|
||||||
*reftable_size = new_reftable_size;
|
*reftable_size = new_reftable_size;
|
||||||
|
@ -3164,13 +3166,14 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
||||||
|
|
||||||
if (new_allocation) {
|
if (new_allocation) {
|
||||||
if (new_reftable_offset) {
|
if (new_reftable_offset) {
|
||||||
qcow2_free_clusters(bs, new_reftable_offset,
|
qcow2_free_clusters(
|
||||||
allocated_reftable_size * sizeof(uint64_t),
|
bs, new_reftable_offset,
|
||||||
QCOW2_DISCARD_NEVER);
|
allocated_reftable_size * REFTABLE_ENTRY_SIZE,
|
||||||
|
QCOW2_DISCARD_NEVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
new_reftable_offset = qcow2_alloc_clusters(bs, new_reftable_size *
|
new_reftable_offset = qcow2_alloc_clusters(bs, new_reftable_size *
|
||||||
sizeof(uint64_t));
|
REFTABLE_ENTRY_SIZE);
|
||||||
if (new_reftable_offset < 0) {
|
if (new_reftable_offset < 0) {
|
||||||
error_setg_errno(errp, -new_reftable_offset,
|
error_setg_errno(errp, -new_reftable_offset,
|
||||||
"Failed to allocate the new reftable");
|
"Failed to allocate the new reftable");
|
||||||
|
@ -3196,7 +3199,7 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
||||||
|
|
||||||
/* Write the new reftable */
|
/* Write the new reftable */
|
||||||
ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset,
|
ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset,
|
||||||
new_reftable_size * sizeof(uint64_t),
|
new_reftable_size * REFTABLE_ENTRY_SIZE,
|
||||||
false);
|
false);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_setg_errno(errp, -ret, "Overlap check failed");
|
error_setg_errno(errp, -ret, "Overlap check failed");
|
||||||
|
@ -3208,7 +3211,7 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable,
|
ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable,
|
||||||
new_reftable_size * sizeof(uint64_t));
|
new_reftable_size * REFTABLE_ENTRY_SIZE);
|
||||||
|
|
||||||
for (i = 0; i < new_reftable_size; i++) {
|
for (i = 0; i < new_reftable_size; i++) {
|
||||||
be64_to_cpus(&new_reftable[i]);
|
be64_to_cpus(&new_reftable[i]);
|
||||||
|
@ -3285,7 +3288,7 @@ done:
|
||||||
|
|
||||||
if (new_reftable_offset > 0) {
|
if (new_reftable_offset > 0) {
|
||||||
qcow2_free_clusters(bs, new_reftable_offset,
|
qcow2_free_clusters(bs, new_reftable_offset,
|
||||||
new_reftable_size * sizeof(uint64_t),
|
new_reftable_size * REFTABLE_ENTRY_SIZE,
|
||||||
QCOW2_DISCARD_OTHER);
|
QCOW2_DISCARD_OTHER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3374,7 +3377,7 @@ int qcow2_shrink_reftable(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
BDRVQcow2State *s = bs->opaque;
|
BDRVQcow2State *s = bs->opaque;
|
||||||
uint64_t *reftable_tmp =
|
uint64_t *reftable_tmp =
|
||||||
g_malloc(s->refcount_table_size * sizeof(uint64_t));
|
g_malloc(s->refcount_table_size * REFTABLE_ENTRY_SIZE);
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
for (i = 0; i < s->refcount_table_size; i++) {
|
for (i = 0; i < s->refcount_table_size; i++) {
|
||||||
|
@ -3412,7 +3415,7 @@ int qcow2_shrink_reftable(BlockDriverState *bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset, reftable_tmp,
|
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset, reftable_tmp,
|
||||||
s->refcount_table_size * sizeof(uint64_t));
|
s->refcount_table_size * REFTABLE_ENTRY_SIZE);
|
||||||
/*
|
/*
|
||||||
* If the write in the reftable failed the image may contain a partially
|
* If the write in the reftable failed the image may contain a partially
|
||||||
* overwritten reftable. In this case it would be better to clear the
|
* overwritten reftable. In this case it would be better to clear the
|
||||||
|
|
|
@ -659,7 +659,7 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
||||||
sn->extra_data_size = sizeof(QCowSnapshotExtraData);
|
sn->extra_data_size = sizeof(QCowSnapshotExtraData);
|
||||||
|
|
||||||
/* Allocate the L1 table of the snapshot and copy the current one there. */
|
/* Allocate the L1 table of the snapshot and copy the current one there. */
|
||||||
l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
|
l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * L1E_SIZE);
|
||||||
if (l1_table_offset < 0) {
|
if (l1_table_offset < 0) {
|
||||||
ret = l1_table_offset;
|
ret = l1_table_offset;
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -679,13 +679,13 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = qcow2_pre_write_overlap_check(bs, 0, sn->l1_table_offset,
|
ret = qcow2_pre_write_overlap_check(bs, 0, sn->l1_table_offset,
|
||||||
s->l1_size * sizeof(uint64_t), false);
|
s->l1_size * L1E_SIZE, false);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table,
|
ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table,
|
||||||
s->l1_size * sizeof(uint64_t));
|
s->l1_size * L1E_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -768,7 +768,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
|
||||||
sn = &s->snapshots[snapshot_index];
|
sn = &s->snapshots[snapshot_index];
|
||||||
|
|
||||||
ret = qcow2_validate_table(bs, sn->l1_table_offset, sn->l1_size,
|
ret = qcow2_validate_table(bs, sn->l1_table_offset, sn->l1_size,
|
||||||
sizeof(uint64_t), QCOW_MAX_L1_SIZE,
|
L1E_SIZE, QCOW_MAX_L1_SIZE,
|
||||||
"Snapshot L1 table", &local_err);
|
"Snapshot L1 table", &local_err);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_report_err(local_err);
|
error_report_err(local_err);
|
||||||
|
@ -803,8 +803,8 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
cur_l1_bytes = s->l1_size * sizeof(uint64_t);
|
cur_l1_bytes = s->l1_size * L1E_SIZE;
|
||||||
sn_l1_bytes = sn->l1_size * sizeof(uint64_t);
|
sn_l1_bytes = sn->l1_size * L1E_SIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the snapshot L1 table to the current L1 table.
|
* Copy the snapshot L1 table to the current L1 table.
|
||||||
|
@ -917,7 +917,7 @@ int qcow2_snapshot_delete(BlockDriverState *bs,
|
||||||
sn = s->snapshots[snapshot_index];
|
sn = s->snapshots[snapshot_index];
|
||||||
|
|
||||||
ret = qcow2_validate_table(bs, sn.l1_table_offset, sn.l1_size,
|
ret = qcow2_validate_table(bs, sn.l1_table_offset, sn.l1_size,
|
||||||
sizeof(uint64_t), QCOW_MAX_L1_SIZE,
|
L1E_SIZE, QCOW_MAX_L1_SIZE,
|
||||||
"Snapshot L1 table", errp);
|
"Snapshot L1 table", errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -953,7 +953,7 @@ int qcow2_snapshot_delete(BlockDriverState *bs,
|
||||||
error_setg_errno(errp, -ret, "Failed to free the cluster and L1 table");
|
error_setg_errno(errp, -ret, "Failed to free the cluster and L1 table");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
qcow2_free_clusters(bs, sn.l1_table_offset, sn.l1_size * sizeof(uint64_t),
|
qcow2_free_clusters(bs, sn.l1_table_offset, sn.l1_size * L1E_SIZE,
|
||||||
QCOW2_DISCARD_SNAPSHOT);
|
QCOW2_DISCARD_SNAPSHOT);
|
||||||
|
|
||||||
/* must update the copied flag on the current cluster offsets */
|
/* must update the copied flag on the current cluster offsets */
|
||||||
|
@ -1030,12 +1030,12 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
|
||||||
|
|
||||||
/* Allocate and read in the snapshot's L1 table */
|
/* Allocate and read in the snapshot's L1 table */
|
||||||
ret = qcow2_validate_table(bs, sn->l1_table_offset, sn->l1_size,
|
ret = qcow2_validate_table(bs, sn->l1_table_offset, sn->l1_size,
|
||||||
sizeof(uint64_t), QCOW_MAX_L1_SIZE,
|
L1E_SIZE, QCOW_MAX_L1_SIZE,
|
||||||
"Snapshot L1 table", errp);
|
"Snapshot L1 table", errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
new_l1_bytes = sn->l1_size * sizeof(uint64_t);
|
new_l1_bytes = sn->l1_size * L1E_SIZE;
|
||||||
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_bytes);
|
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_bytes);
|
||||||
if (new_l1_table == NULL) {
|
if (new_l1_table == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
108
block/qcow2.c
108
block/qcow2.c
|
@ -1543,7 +1543,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
|
||||||
|
|
||||||
/* read the level 1 table */
|
/* read the level 1 table */
|
||||||
ret = qcow2_validate_table(bs, header.l1_table_offset,
|
ret = qcow2_validate_table(bs, header.l1_table_offset,
|
||||||
header.l1_size, sizeof(uint64_t),
|
header.l1_size, L1E_SIZE,
|
||||||
QCOW_MAX_L1_SIZE, "Active L1 table", errp);
|
QCOW_MAX_L1_SIZE, "Active L1 table", errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -1568,15 +1568,14 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->l1_size > 0) {
|
if (s->l1_size > 0) {
|
||||||
s->l1_table = qemu_try_blockalign(bs->file->bs,
|
s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE);
|
||||||
s->l1_size * sizeof(uint64_t));
|
|
||||||
if (s->l1_table == NULL) {
|
if (s->l1_table == NULL) {
|
||||||
error_setg(errp, "Could not allocate L1 table");
|
error_setg(errp, "Could not allocate L1 table");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
|
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
|
||||||
s->l1_size * sizeof(uint64_t));
|
s->l1_size * L1E_SIZE);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_setg_errno(errp, -ret, "Could not read L1 table");
|
error_setg_errno(errp, -ret, "Could not read L1 table");
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -2102,7 +2101,6 @@ static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
|
||||||
QCowL2Meta *next;
|
QCowL2Meta *next;
|
||||||
|
|
||||||
if (link_l2) {
|
if (link_l2) {
|
||||||
assert(!l2meta->prealloc);
|
|
||||||
ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
|
ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2112,9 +2110,7 @@ static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Take the request off the list of running requests */
|
/* Take the request off the list of running requests */
|
||||||
if (l2meta->nb_clusters != 0) {
|
QLIST_REMOVE(l2meta, next_in_flight);
|
||||||
QLIST_REMOVE(l2meta, next_in_flight);
|
|
||||||
}
|
|
||||||
|
|
||||||
qemu_co_queue_restart_all(&l2meta->dependent_requests);
|
qemu_co_queue_restart_all(&l2meta->dependent_requests);
|
||||||
|
|
||||||
|
@ -2563,7 +2559,7 @@ static coroutine_fn int qcow2_co_pwritev_part(
|
||||||
int offset_in_cluster;
|
int offset_in_cluster;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int cur_bytes; /* number of sectors in current iteration */
|
unsigned int cur_bytes; /* number of sectors in current iteration */
|
||||||
uint64_t cluster_offset;
|
uint64_t host_offset;
|
||||||
QCowL2Meta *l2meta = NULL;
|
QCowL2Meta *l2meta = NULL;
|
||||||
AioTaskPool *aio = NULL;
|
AioTaskPool *aio = NULL;
|
||||||
|
|
||||||
|
@ -2584,16 +2580,13 @@ static coroutine_fn int qcow2_co_pwritev_part(
|
||||||
|
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
|
|
||||||
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
|
ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
|
||||||
&cluster_offset, &l2meta);
|
&host_offset, &l2meta);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out_locked;
|
goto out_locked;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(offset_into_cluster(s, cluster_offset) == 0);
|
ret = qcow2_pre_write_overlap_check(bs, 0, host_offset,
|
||||||
|
|
||||||
ret = qcow2_pre_write_overlap_check(bs, 0,
|
|
||||||
cluster_offset + offset_in_cluster,
|
|
||||||
cur_bytes, true);
|
cur_bytes, true);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out_locked;
|
goto out_locked;
|
||||||
|
@ -2605,7 +2598,7 @@ static coroutine_fn int qcow2_co_pwritev_part(
|
||||||
aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
|
aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
|
||||||
}
|
}
|
||||||
ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
|
ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
|
||||||
cluster_offset + offset_in_cluster, offset,
|
host_offset, offset,
|
||||||
cur_bytes, qiov, qiov_offset, l2meta);
|
cur_bytes, qiov, qiov_offset, l2meta);
|
||||||
l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
|
l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -3126,38 +3119,28 @@ static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
|
||||||
int64_t file_length;
|
int64_t file_length;
|
||||||
unsigned int cur_bytes;
|
unsigned int cur_bytes;
|
||||||
int ret;
|
int ret;
|
||||||
QCowL2Meta *meta;
|
QCowL2Meta *meta = NULL, *m;
|
||||||
|
|
||||||
assert(offset <= new_length);
|
assert(offset <= new_length);
|
||||||
bytes = new_length - offset;
|
bytes = new_length - offset;
|
||||||
|
|
||||||
while (bytes) {
|
while (bytes) {
|
||||||
cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
|
cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
|
||||||
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
|
ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
|
||||||
&host_offset, &meta);
|
&host_offset, &meta);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_setg_errno(errp, -ret, "Allocating clusters failed");
|
error_setg_errno(errp, -ret, "Allocating clusters failed");
|
||||||
return ret;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (meta) {
|
for (m = meta; m != NULL; m = m->next) {
|
||||||
QCowL2Meta *next = meta->next;
|
m->prealloc = true;
|
||||||
meta->prealloc = true;
|
}
|
||||||
|
|
||||||
ret = qcow2_alloc_cluster_link_l2(bs, meta);
|
ret = qcow2_handle_l2meta(bs, &meta, true);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_setg_errno(errp, -ret, "Mapping clusters failed");
|
error_setg_errno(errp, -ret, "Mapping clusters failed");
|
||||||
qcow2_free_any_clusters(bs, meta->alloc_offset,
|
goto out;
|
||||||
meta->nb_clusters, QCOW2_DISCARD_NEVER);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* There are no dependent requests, but we need to remove our
|
|
||||||
* request from the list of in-flight requests */
|
|
||||||
QLIST_REMOVE(meta, next_in_flight);
|
|
||||||
|
|
||||||
g_free(meta);
|
|
||||||
meta = next;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO Preallocate data if requested */
|
/* TODO Preallocate data if requested */
|
||||||
|
@ -3174,7 +3157,8 @@ static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
|
||||||
file_length = bdrv_getlength(s->data_file->bs);
|
file_length = bdrv_getlength(s->data_file->bs);
|
||||||
if (file_length < 0) {
|
if (file_length < 0) {
|
||||||
error_setg_errno(errp, -file_length, "Could not get file size");
|
error_setg_errno(errp, -file_length, "Could not get file size");
|
||||||
return file_length;
|
ret = file_length;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (host_offset + cur_bytes > file_length) {
|
if (host_offset + cur_bytes > file_length) {
|
||||||
|
@ -3184,11 +3168,15 @@ static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
|
||||||
ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
|
ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
|
||||||
mode, 0, errp);
|
mode, 0, errp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
ret = 0;
|
||||||
|
|
||||||
|
out:
|
||||||
|
qcow2_handle_l2meta(bs, &meta, false);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* qcow2_refcount_metadata_size:
|
/* qcow2_refcount_metadata_size:
|
||||||
|
@ -3213,7 +3201,7 @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
|
||||||
* where no further refcount blocks or table clusters are required to
|
* where no further refcount blocks or table clusters are required to
|
||||||
* reference count every cluster.
|
* reference count every cluster.
|
||||||
*/
|
*/
|
||||||
int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t);
|
int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE;
|
||||||
int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
|
int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
|
||||||
int64_t table = 0; /* number of refcount table clusters */
|
int64_t table = 0; /* number of refcount table clusters */
|
||||||
int64_t blocks = 0; /* number of refcount block clusters */
|
int64_t blocks = 0; /* number of refcount block clusters */
|
||||||
|
@ -3270,8 +3258,8 @@ static int64_t qcow2_calc_prealloc_size(int64_t total_size,
|
||||||
|
|
||||||
/* total size of L1 tables */
|
/* total size of L1 tables */
|
||||||
nl1e = nl2e * l2e_size / cluster_size;
|
nl1e = nl2e * l2e_size / cluster_size;
|
||||||
nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t));
|
nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE);
|
||||||
meta_size += nl1e * sizeof(uint64_t);
|
meta_size += nl1e * L1E_SIZE;
|
||||||
|
|
||||||
/* total size of refcount table and blocks */
|
/* total size of refcount table and blocks */
|
||||||
meta_size += qcow2_refcount_metadata_size(
|
meta_size += qcow2_refcount_metadata_size(
|
||||||
|
@ -3916,7 +3904,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
|
||||||
type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
|
type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
|
||||||
type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
|
type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
return -ENOTSUP;
|
return ret < 0 ? ret : -ENOTSUP;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
|
@ -4051,10 +4039,9 @@ qcow2_co_copy_range_to(BlockDriverState *bs,
|
||||||
BdrvRequestFlags write_flags)
|
BdrvRequestFlags write_flags)
|
||||||
{
|
{
|
||||||
BDRVQcow2State *s = bs->opaque;
|
BDRVQcow2State *s = bs->opaque;
|
||||||
int offset_in_cluster;
|
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int cur_bytes; /* number of sectors in current iteration */
|
unsigned int cur_bytes; /* number of sectors in current iteration */
|
||||||
uint64_t cluster_offset;
|
uint64_t host_offset;
|
||||||
QCowL2Meta *l2meta = NULL;
|
QCowL2Meta *l2meta = NULL;
|
||||||
|
|
||||||
assert(!bs->encrypted);
|
assert(!bs->encrypted);
|
||||||
|
@ -4065,31 +4052,26 @@ qcow2_co_copy_range_to(BlockDriverState *bs,
|
||||||
|
|
||||||
l2meta = NULL;
|
l2meta = NULL;
|
||||||
|
|
||||||
offset_in_cluster = offset_into_cluster(s, dst_offset);
|
|
||||||
cur_bytes = MIN(bytes, INT_MAX);
|
cur_bytes = MIN(bytes, INT_MAX);
|
||||||
|
|
||||||
/* TODO:
|
/* TODO:
|
||||||
* If src->bs == dst->bs, we could simply copy by incrementing
|
* If src->bs == dst->bs, we could simply copy by incrementing
|
||||||
* the refcnt, without copying user data.
|
* the refcnt, without copying user data.
|
||||||
* Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
|
* Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
|
||||||
ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes,
|
ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes,
|
||||||
&cluster_offset, &l2meta);
|
&host_offset, &l2meta);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(offset_into_cluster(s, cluster_offset) == 0);
|
ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes,
|
||||||
|
true);
|
||||||
ret = qcow2_pre_write_overlap_check(bs, 0,
|
|
||||||
cluster_offset + offset_in_cluster, cur_bytes, true);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
ret = bdrv_co_copy_range_to(src, src_offset,
|
ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset,
|
||||||
s->data_file,
|
|
||||||
cluster_offset + offset_in_cluster,
|
|
||||||
cur_bytes, read_flags, write_flags);
|
cur_bytes, read_flags, write_flags);
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -4460,7 +4442,7 @@ static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
|
||||||
/* write updated header.size */
|
/* write updated header.size */
|
||||||
offset = cpu_to_be64(offset);
|
offset = cpu_to_be64(offset);
|
||||||
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
|
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
|
||||||
&offset, sizeof(uint64_t));
|
&offset, sizeof(offset));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_setg_errno(errp, -ret, "Failed to update the image size");
|
error_setg_errno(errp, -ret, "Failed to update the image size");
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -4700,8 +4682,8 @@ static int make_completely_empty(BlockDriverState *bs)
|
||||||
|
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
|
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
|
||||||
|
|
||||||
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
|
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
|
||||||
l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t);
|
l1_size2 = (uint64_t)s->l1_size * L1E_SIZE;
|
||||||
|
|
||||||
/* After this call, neither the in-memory nor the on-disk refcount
|
/* After this call, neither the in-memory nor the on-disk refcount
|
||||||
* information accurately describe the actual references */
|
* information accurately describe the actual references */
|
||||||
|
@ -4747,14 +4729,14 @@ static int make_completely_empty(BlockDriverState *bs)
|
||||||
|
|
||||||
s->l1_table_offset = 3 * s->cluster_size;
|
s->l1_table_offset = 3 * s->cluster_size;
|
||||||
|
|
||||||
new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t));
|
new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE);
|
||||||
if (!new_reftable) {
|
if (!new_reftable) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail_broken_refcounts;
|
goto fail_broken_refcounts;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->refcount_table_offset = s->cluster_size;
|
s->refcount_table_offset = s->cluster_size;
|
||||||
s->refcount_table_size = s->cluster_size / sizeof(uint64_t);
|
s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE;
|
||||||
s->max_refcount_table_index = 0;
|
s->max_refcount_table_index = 0;
|
||||||
|
|
||||||
g_free(s->refcount_table);
|
g_free(s->refcount_table);
|
||||||
|
@ -4826,7 +4808,7 @@ static int qcow2_make_empty(BlockDriverState *bs)
|
||||||
int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
|
int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
|
||||||
int l1_clusters, ret = 0;
|
int l1_clusters, ret = 0;
|
||||||
|
|
||||||
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
|
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
|
||||||
|
|
||||||
if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
|
if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
|
||||||
3 + l1_clusters <= s->refcount_block_size &&
|
3 + l1_clusters <= s->refcount_block_size &&
|
||||||
|
@ -4957,7 +4939,7 @@ static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
|
||||||
l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
|
l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
|
||||||
l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
|
l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
|
||||||
cluster_size / l2e_size);
|
cluster_size / l2e_size);
|
||||||
if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) {
|
if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) {
|
||||||
error_setg(&local_err, "The image size is too large "
|
error_setg(&local_err, "The image size is too large "
|
||||||
"(try using a larger cluster size)");
|
"(try using a larger cluster size)");
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -99,6 +99,12 @@
|
||||||
#define L2E_SIZE_NORMAL (sizeof(uint64_t))
|
#define L2E_SIZE_NORMAL (sizeof(uint64_t))
|
||||||
#define L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
|
#define L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
|
||||||
|
|
||||||
|
/* Size of L1 table entries */
|
||||||
|
#define L1E_SIZE (sizeof(uint64_t))
|
||||||
|
|
||||||
|
/* Size of reftable entries */
|
||||||
|
#define REFTABLE_ENTRY_SIZE (sizeof(uint64_t))
|
||||||
|
|
||||||
#define MIN_CLUSTER_BITS 9
|
#define MIN_CLUSTER_BITS 9
|
||||||
#define MAX_CLUSTER_BITS 21
|
#define MAX_CLUSTER_BITS 21
|
||||||
|
|
||||||
|
@ -855,8 +861,8 @@ int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
|
||||||
void qcow2_free_clusters(BlockDriverState *bs,
|
void qcow2_free_clusters(BlockDriverState *bs,
|
||||||
int64_t offset, int64_t size,
|
int64_t offset, int64_t size,
|
||||||
enum qcow2_discard_type type);
|
enum qcow2_discard_type type);
|
||||||
void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
|
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
|
||||||
int nb_clusters, enum qcow2_discard_type type);
|
enum qcow2_discard_type type);
|
||||||
|
|
||||||
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
|
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
|
||||||
int64_t l1_table_offset, int l1_size, int addend);
|
int64_t l1_table_offset, int l1_size, int addend);
|
||||||
|
@ -895,9 +901,9 @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
|
||||||
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
|
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||||
unsigned int *bytes, uint64_t *host_offset,
|
unsigned int *bytes, uint64_t *host_offset,
|
||||||
QCow2SubclusterType *subcluster_type);
|
QCow2SubclusterType *subcluster_type);
|
||||||
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
|
int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
|
||||||
unsigned int *bytes, uint64_t *host_offset,
|
unsigned int *bytes, uint64_t *host_offset,
|
||||||
QCowL2Meta **m);
|
QCowL2Meta **m);
|
||||||
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
|
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
|
||||||
uint64_t offset,
|
uint64_t offset,
|
||||||
int compressed_size,
|
int compressed_size,
|
||||||
|
|
|
@ -29,6 +29,8 @@
|
||||||
|
|
||||||
#define HASH_LENGTH 32
|
#define HASH_LENGTH 32
|
||||||
|
|
||||||
|
#define INDEXSTR_LEN 32
|
||||||
|
|
||||||
#define QUORUM_OPT_VOTE_THRESHOLD "vote-threshold"
|
#define QUORUM_OPT_VOTE_THRESHOLD "vote-threshold"
|
||||||
#define QUORUM_OPT_BLKVERIFY "blkverify"
|
#define QUORUM_OPT_BLKVERIFY "blkverify"
|
||||||
#define QUORUM_OPT_REWRITE "rewrite-corrupted"
|
#define QUORUM_OPT_REWRITE "rewrite-corrupted"
|
||||||
|
@ -970,9 +972,9 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
|
||||||
opened = g_new0(bool, s->num_children);
|
opened = g_new0(bool, s->num_children);
|
||||||
|
|
||||||
for (i = 0; i < s->num_children; i++) {
|
for (i = 0; i < s->num_children; i++) {
|
||||||
char indexstr[32];
|
char indexstr[INDEXSTR_LEN];
|
||||||
ret = snprintf(indexstr, 32, "children.%d", i);
|
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%d", i);
|
||||||
assert(ret < 32);
|
assert(ret < INDEXSTR_LEN);
|
||||||
|
|
||||||
s->children[i] = bdrv_open_child(NULL, options, indexstr, bs,
|
s->children[i] = bdrv_open_child(NULL, options, indexstr, bs,
|
||||||
&child_of_bds, BDRV_CHILD_DATA, false,
|
&child_of_bds, BDRV_CHILD_DATA, false,
|
||||||
|
@ -1024,7 +1026,7 @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
|
||||||
{
|
{
|
||||||
BDRVQuorumState *s = bs->opaque;
|
BDRVQuorumState *s = bs->opaque;
|
||||||
BdrvChild *child;
|
BdrvChild *child;
|
||||||
char indexstr[32];
|
char indexstr[INDEXSTR_LEN];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (s->is_blkverify) {
|
if (s->is_blkverify) {
|
||||||
|
@ -1039,8 +1041,8 @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = snprintf(indexstr, 32, "children.%u", s->next_child_index);
|
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index);
|
||||||
if (ret < 0 || ret >= 32) {
|
if (ret < 0 || ret >= INDEXSTR_LEN) {
|
||||||
error_setg(errp, "cannot generate child name");
|
error_setg(errp, "cannot generate child name");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1068,6 +1070,7 @@ static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
BDRVQuorumState *s = bs->opaque;
|
BDRVQuorumState *s = bs->opaque;
|
||||||
|
char indexstr[INDEXSTR_LEN];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < s->num_children; i++) {
|
for (i = 0; i < s->num_children; i++) {
|
||||||
|
@ -1089,6 +1092,11 @@ static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
|
||||||
/* We know now that num_children > threshold, so blkverify must be false */
|
/* We know now that num_children > threshold, so blkverify must be false */
|
||||||
assert(!s->is_blkverify);
|
assert(!s->is_blkverify);
|
||||||
|
|
||||||
|
snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index - 1);
|
||||||
|
if (!strncmp(child->name, indexstr, INDEXSTR_LEN)) {
|
||||||
|
s->next_child_index--;
|
||||||
|
}
|
||||||
|
|
||||||
bdrv_drained_begin(bs);
|
bdrv_drained_begin(bs);
|
||||||
|
|
||||||
/* We can safely remove this child now */
|
/* We can safely remove this child now */
|
||||||
|
|
43
block/rbd.c
43
block/rbd.c
|
@ -341,48 +341,6 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static QemuOptsList runtime_opts = {
|
|
||||||
.name = "rbd",
|
|
||||||
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
|
|
||||||
.desc = {
|
|
||||||
{
|
|
||||||
.name = "pool",
|
|
||||||
.type = QEMU_OPT_STRING,
|
|
||||||
.help = "Rados pool name",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.name = "namespace",
|
|
||||||
.type = QEMU_OPT_STRING,
|
|
||||||
.help = "Rados namespace name in the pool",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.name = "image",
|
|
||||||
.type = QEMU_OPT_STRING,
|
|
||||||
.help = "Image name in the pool",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.name = "conf",
|
|
||||||
.type = QEMU_OPT_STRING,
|
|
||||||
.help = "Rados config file location",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.name = "snapshot",
|
|
||||||
.type = QEMU_OPT_STRING,
|
|
||||||
.help = "Ceph snapshot name",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
/* maps to 'id' in rados_create() */
|
|
||||||
.name = "user",
|
|
||||||
.type = QEMU_OPT_STRING,
|
|
||||||
.help = "Rados id name",
|
|
||||||
},
|
|
||||||
/*
|
|
||||||
* server.* extracted manually, see qemu_rbd_mon_host()
|
|
||||||
*/
|
|
||||||
{ /* end of list */ }
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/* FIXME Deprecate and remove keypairs or make it available in QMP. */
|
/* FIXME Deprecate and remove keypairs or make it available in QMP. */
|
||||||
static int qemu_rbd_do_create(BlockdevCreateOptions *options,
|
static int qemu_rbd_do_create(BlockdevCreateOptions *options,
|
||||||
const char *keypairs, const char *password_secret,
|
const char *keypairs, const char *password_secret,
|
||||||
|
@ -1289,6 +1247,7 @@ static QemuOptsList qemu_rbd_create_opts = {
|
||||||
|
|
||||||
static const char *const qemu_rbd_strong_runtime_opts[] = {
|
static const char *const qemu_rbd_strong_runtime_opts[] = {
|
||||||
"pool",
|
"pool",
|
||||||
|
"namespace",
|
||||||
"image",
|
"image",
|
||||||
"conf",
|
"conf",
|
||||||
"snapshot",
|
"snapshot",
|
||||||
|
|
|
@ -816,9 +816,9 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* only 2 supported sector sizes */
|
/* Currently we only support 512 */
|
||||||
if (s->logical_sector_size != 512 && s->logical_sector_size != 4096) {
|
if (s->logical_sector_size != 512) {
|
||||||
ret = -EINVAL;
|
ret = -ENOTSUP;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
32
qemu-img.c
32
qemu-img.c
|
@ -1201,10 +1201,10 @@ static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum,
|
||||||
*pnum = 0;
|
*pnum = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
is_zero = buffer_is_zero(buf, 512);
|
is_zero = buffer_is_zero(buf, BDRV_SECTOR_SIZE);
|
||||||
for(i = 1; i < n; i++) {
|
for(i = 1; i < n; i++) {
|
||||||
buf += 512;
|
buf += BDRV_SECTOR_SIZE;
|
||||||
if (is_zero != buffer_is_zero(buf, 512)) {
|
if (is_zero != buffer_is_zero(buf, BDRV_SECTOR_SIZE)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1666,6 +1666,7 @@ enum ImgConvertBlockStatus {
|
||||||
typedef struct ImgConvertState {
|
typedef struct ImgConvertState {
|
||||||
BlockBackend **src;
|
BlockBackend **src;
|
||||||
int64_t *src_sectors;
|
int64_t *src_sectors;
|
||||||
|
int *src_alignment;
|
||||||
int src_num;
|
int src_num;
|
||||||
int64_t total_sectors;
|
int64_t total_sectors;
|
||||||
int64_t allocated_sectors;
|
int64_t allocated_sectors;
|
||||||
|
@ -1732,6 +1733,7 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
|
||||||
if (s->sector_next_status <= sector_num) {
|
if (s->sector_next_status <= sector_num) {
|
||||||
uint64_t offset = (sector_num - src_cur_offset) * BDRV_SECTOR_SIZE;
|
uint64_t offset = (sector_num - src_cur_offset) * BDRV_SECTOR_SIZE;
|
||||||
int64_t count;
|
int64_t count;
|
||||||
|
int tail;
|
||||||
BlockDriverState *src_bs = blk_bs(s->src[src_cur]);
|
BlockDriverState *src_bs = blk_bs(s->src[src_cur]);
|
||||||
BlockDriverState *base;
|
BlockDriverState *base;
|
||||||
|
|
||||||
|
@ -1772,6 +1774,16 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
|
||||||
|
|
||||||
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
|
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Avoid that s->sector_next_status becomes unaligned to the source
|
||||||
|
* request alignment and/or cluster size to avoid unnecessary read
|
||||||
|
* cycles.
|
||||||
|
*/
|
||||||
|
tail = (sector_num - src_cur_offset + n) % s->src_alignment[src_cur];
|
||||||
|
if (n > tail) {
|
||||||
|
n -= tail;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret & BDRV_BLOCK_ZERO) {
|
if (ret & BDRV_BLOCK_ZERO) {
|
||||||
s->status = post_backing_zero ? BLK_BACKING_FILE : BLK_ZERO;
|
s->status = post_backing_zero ? BLK_BACKING_FILE : BLK_ZERO;
|
||||||
} else if (ret & BDRV_BLOCK_DATA) {
|
} else if (ret & BDRV_BLOCK_DATA) {
|
||||||
|
@ -2410,8 +2422,10 @@ static int img_convert(int argc, char **argv)
|
||||||
|
|
||||||
s.src = g_new0(BlockBackend *, s.src_num);
|
s.src = g_new0(BlockBackend *, s.src_num);
|
||||||
s.src_sectors = g_new(int64_t, s.src_num);
|
s.src_sectors = g_new(int64_t, s.src_num);
|
||||||
|
s.src_alignment = g_new(int, s.src_num);
|
||||||
|
|
||||||
for (bs_i = 0; bs_i < s.src_num; bs_i++) {
|
for (bs_i = 0; bs_i < s.src_num; bs_i++) {
|
||||||
|
BlockDriverState *src_bs;
|
||||||
s.src[bs_i] = img_open(image_opts, argv[optind + bs_i],
|
s.src[bs_i] = img_open(image_opts, argv[optind + bs_i],
|
||||||
fmt, src_flags, src_writethrough, s.quiet,
|
fmt, src_flags, src_writethrough, s.quiet,
|
||||||
force_share);
|
force_share);
|
||||||
|
@ -2426,6 +2440,13 @@ static int img_convert(int argc, char **argv)
|
||||||
ret = -1;
|
ret = -1;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
src_bs = blk_bs(s.src[bs_i]);
|
||||||
|
s.src_alignment[bs_i] = DIV_ROUND_UP(src_bs->bl.request_alignment,
|
||||||
|
BDRV_SECTOR_SIZE);
|
||||||
|
if (!bdrv_get_info(src_bs, &bdi)) {
|
||||||
|
s.src_alignment[bs_i] = MAX(s.src_alignment[bs_i],
|
||||||
|
bdi.cluster_size / BDRV_SECTOR_SIZE);
|
||||||
|
}
|
||||||
s.total_sectors += s.src_sectors[bs_i];
|
s.total_sectors += s.src_sectors[bs_i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2492,8 +2513,8 @@ static int img_convert(int argc, char **argv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s.total_sectors * 512,
|
qemu_opt_set_number(opts, BLOCK_OPT_SIZE,
|
||||||
&error_abort);
|
s.total_sectors * BDRV_SECTOR_SIZE, &error_abort);
|
||||||
ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
|
ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2708,6 +2729,7 @@ out:
|
||||||
g_free(s.src);
|
g_free(s.src);
|
||||||
}
|
}
|
||||||
g_free(s.src_sectors);
|
g_free(s.src_sectors);
|
||||||
|
g_free(s.src_alignment);
|
||||||
fail_getopt:
|
fail_getopt:
|
||||||
g_free(options);
|
g_free(options);
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,9 @@ fi
|
||||||
|
|
||||||
cd tests/qemu-iotests
|
cd tests/qemu-iotests
|
||||||
|
|
||||||
|
# QEMU_CHECK_BLOCK_AUTO is used to disable some unstable sub-tests
|
||||||
|
export QEMU_CHECK_BLOCK_AUTO=1
|
||||||
|
|
||||||
ret=0
|
ret=0
|
||||||
for fmt in $format_list ; do
|
for fmt in $format_list ; do
|
||||||
./check -makecheck -$fmt $group || ret=1
|
./check -makecheck -$fmt $group || ret=1
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
import iotests
|
import iotests
|
||||||
|
import unittest
|
||||||
from iotests import qemu_img, qemu_io
|
from iotests import qemu_img, qemu_io
|
||||||
|
|
||||||
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
backing_img = os.path.join(iotests.test_dir, 'backing.img')
|
||||||
|
@ -228,6 +229,7 @@ class TestParallelOps(iotests.QMPTestCase):
|
||||||
|
|
||||||
# Test that it's possible to run several block-stream operations
|
# Test that it's possible to run several block-stream operations
|
||||||
# in parallel in the same snapshot chain
|
# in parallel in the same snapshot chain
|
||||||
|
@unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
|
||||||
def test_stream_parallel(self):
|
def test_stream_parallel(self):
|
||||||
self.assert_no_active_block_jobs()
|
self.assert_no_active_block_jobs()
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,10 @@ get_image_size_on_host()
|
||||||
|
|
||||||
_supported_fmt qcow2
|
_supported_fmt qcow2
|
||||||
_supported_proto file
|
_supported_proto file
|
||||||
|
# Growing a file with a backing file (without preallocation=full or
|
||||||
|
# =falloc) requires zeroing the newly added area, which is impossible
|
||||||
|
# to do quickly for v2 images, and hence is unsupported.
|
||||||
|
_unsupported_imgopts 'compat=0.10'
|
||||||
|
|
||||||
if [ -z "$TEST_IMG_FILE" ]; then
|
if [ -z "$TEST_IMG_FILE" ]; then
|
||||||
TEST_IMG_FILE=$TEST_IMG
|
TEST_IMG_FILE=$TEST_IMG
|
||||||
|
@ -168,24 +172,28 @@ done
|
||||||
$QEMU_IMG create -f raw "$TEST_IMG.base" 128k | _filter_img_create
|
$QEMU_IMG create -f raw "$TEST_IMG.base" 128k | _filter_img_create
|
||||||
$QEMU_IO -c 'write -q -P 1 0 128k' -f raw "$TEST_IMG.base"
|
$QEMU_IO -c 'write -q -P 1 0 128k' -f raw "$TEST_IMG.base"
|
||||||
for orig_size in 31k 33k; do
|
for orig_size in 31k 33k; do
|
||||||
echo "--- Resizing image from $orig_size to 96k ---"
|
for dst_size in 96k 128k; do
|
||||||
_make_test_img -F raw -b "$TEST_IMG.base" -o cluster_size=64k "$orig_size"
|
for prealloc in metadata full; do
|
||||||
$QEMU_IMG resize -f "$IMGFMT" --preallocation=full "$TEST_IMG" 96k
|
echo "--- Resizing image from $orig_size to $dst_size (preallocation=$prealloc) ---"
|
||||||
# The first part of the image should contain data from the backing file
|
_make_test_img -F raw -b "$TEST_IMG.base" -o cluster_size=64k "$orig_size"
|
||||||
$QEMU_IO -c "read -q -P 1 0 ${orig_size}" "$TEST_IMG"
|
$QEMU_IMG resize -f "$IMGFMT" --preallocation="$prealloc" "$TEST_IMG" "$dst_size"
|
||||||
# The resized part of the image should contain zeroes
|
# The first part of the image should contain data from the backing file
|
||||||
$QEMU_IO -c "read -q -P 0 ${orig_size} 63k" "$TEST_IMG"
|
$QEMU_IO -c "read -q -P 1 0 ${orig_size}" "$TEST_IMG"
|
||||||
# If the image does not have an external data file we can also verify its
|
# The resized part of the image should contain zeroes
|
||||||
# actual size. The resized image should have 7 clusters:
|
$QEMU_IO -c "read -q -P 0 ${orig_size} 63k" "$TEST_IMG"
|
||||||
# header, L1 table, L2 table, refcount table, refcount block, 2 data clusters
|
# If the image does not have an external data file we can also verify its
|
||||||
if ! _get_data_file "$TEST_IMG" > /dev/null; then
|
# actual size. The resized image should have 7 clusters:
|
||||||
expected_file_length=$((65536 * 7))
|
# header, L1 table, L2 table, refcount table, refcount block, 2 data clusters
|
||||||
file_length=$(stat -c '%s' "$TEST_IMG_FILE")
|
if ! _get_data_file "$TEST_IMG" > /dev/null; then
|
||||||
if [ "$file_length" != "$expected_file_length" ]; then
|
expected_file_length=$((65536 * 7))
|
||||||
echo "ERROR: file length $file_length (expected $expected_file_length)"
|
file_length=$(stat -c '%s' "$TEST_IMG_FILE")
|
||||||
fi
|
if [ "$file_length" != "$expected_file_length" ]; then
|
||||||
fi
|
echo "ERROR: file length $file_length (expected $expected_file_length)"
|
||||||
echo
|
fi
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
# success, all done
|
# success, all done
|
||||||
|
|
|
@ -768,11 +768,35 @@ wrote 81920/81920 bytes at offset 2048000
|
||||||
80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
|
||||||
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=raw size=131072
|
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=raw size=131072
|
||||||
--- Resizing image from 31k to 96k ---
|
--- Resizing image from 31k to 96k (preallocation=metadata) ---
|
||||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
Image resized.
|
Image resized.
|
||||||
|
|
||||||
--- Resizing image from 33k to 96k ---
|
--- Resizing image from 31k to 96k (preallocation=full) ---
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
|
Image resized.
|
||||||
|
|
||||||
|
--- Resizing image from 31k to 128k (preallocation=metadata) ---
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
|
Image resized.
|
||||||
|
|
||||||
|
--- Resizing image from 31k to 128k (preallocation=full) ---
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
|
Image resized.
|
||||||
|
|
||||||
|
--- Resizing image from 33k to 96k (preallocation=metadata) ---
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
|
Image resized.
|
||||||
|
|
||||||
|
--- Resizing image from 33k to 96k (preallocation=full) ---
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
|
Image resized.
|
||||||
|
|
||||||
|
--- Resizing image from 33k to 128k (preallocation=metadata) ---
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
|
Image resized.
|
||||||
|
|
||||||
|
--- Resizing image from 33k to 128k (preallocation=full) ---
|
||||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
|
||||||
Image resized.
|
Image resized.
|
||||||
|
|
||||||
|
|
|
@ -26,8 +26,8 @@ iotests.script_initialize(supported_fmts=['qcow2', 'qed', 'raw'],
|
||||||
|
|
||||||
with iotests.FilePath('source.img') as source_img_path, \
|
with iotests.FilePath('source.img') as source_img_path, \
|
||||||
iotests.FilePath('dest.img') as dest_img_path, \
|
iotests.FilePath('dest.img') as dest_img_path, \
|
||||||
iotests.FilePaths(['migration.sock', 'nbd.sock'], iotests.sock_dir) as \
|
iotests.FilePath('migration.sock', 'nbd.sock', base_dir=iotests.sock_dir) \
|
||||||
[migration_sock_path, nbd_sock_path], \
|
as (migration_sock_path, nbd_sock_path), \
|
||||||
iotests.VM('source') as source_vm, \
|
iotests.VM('source') as source_vm, \
|
||||||
iotests.VM('dest') as dest_vm:
|
iotests.VM('dest') as dest_vm:
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ iotests.script_initialize(supported_fmts=['generic'])
|
||||||
|
|
||||||
with iotests.FilePath('disk.img') as disk_img_path, \
|
with iotests.FilePath('disk.img') as disk_img_path, \
|
||||||
iotests.FilePath('disk-snapshot.img') as disk_snapshot_img_path, \
|
iotests.FilePath('disk-snapshot.img') as disk_snapshot_img_path, \
|
||||||
iotests.FilePath('nbd.sock', iotests.sock_dir) as nbd_sock_path, \
|
iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock_path, \
|
||||||
iotests.VM() as vm:
|
iotests.VM() as vm:
|
||||||
|
|
||||||
img_size = '10M'
|
img_size = '10M'
|
||||||
|
|
|
@ -49,7 +49,7 @@ remainder = [("0xd5", "0x108000", "32k"), # Right-end of partial-left [1]
|
||||||
|
|
||||||
with iotests.FilePath('base.img') as base_img_path, \
|
with iotests.FilePath('base.img') as base_img_path, \
|
||||||
iotests.FilePath('fleece.img') as fleece_img_path, \
|
iotests.FilePath('fleece.img') as fleece_img_path, \
|
||||||
iotests.FilePath('nbd.sock', iotests.sock_dir) as nbd_sock_path, \
|
iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock_path, \
|
||||||
iotests.VM() as vm:
|
iotests.VM() as vm:
|
||||||
|
|
||||||
log('--- Setting up images ---')
|
log('--- Setting up images ---')
|
||||||
|
|
|
@ -46,8 +46,11 @@ if [ "$IMGOPTSSYNTAX" = "true" ]; then
|
||||||
# We use json:{} filenames here, so we cannot work with additional options.
|
# We use json:{} filenames here, so we cannot work with additional options.
|
||||||
_unsupported_fmt $IMGFMT
|
_unsupported_fmt $IMGFMT
|
||||||
else
|
else
|
||||||
# With VDI, the output is ordered differently. Just disable it.
|
# - With VDI, the output is ordered differently. Just disable it.
|
||||||
_unsupported_fmt vdi
|
# - VHDX has large clusters; because qemu-img convert tries to
|
||||||
|
# align the requests to the cluster size, the output is ordered
|
||||||
|
# differently, so disable it, too.
|
||||||
|
_unsupported_fmt vdi vhdx
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -275,10 +275,9 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
|
||||||
an incomplete backup. Testing limitations prevent
|
an incomplete backup. Testing limitations prevent
|
||||||
testing competing writes.
|
testing competing writes.
|
||||||
"""
|
"""
|
||||||
with iotests.FilePaths(['img', 'bsync1', 'bsync2',
|
with iotests.FilePath(
|
||||||
'fbackup0', 'fbackup1', 'fbackup2']) as \
|
'img', 'bsync1', 'bsync2', 'fbackup0', 'fbackup1', 'fbackup2') as \
|
||||||
(img_path, bsync1, bsync2,
|
(img_path, bsync1, bsync2, fbackup0, fbackup1, fbackup2), \
|
||||||
fbackup0, fbackup1, fbackup2), \
|
|
||||||
iotests.VM() as vm:
|
iotests.VM() as vm:
|
||||||
|
|
||||||
mode = "Mode {:s}; Bitmap Sync {:s}".format(msync_mode, bsync_mode)
|
mode = "Mode {:s}; Bitmap Sync {:s}".format(msync_mode, bsync_mode)
|
||||||
|
@ -441,8 +440,7 @@ def test_backup_api():
|
||||||
"""
|
"""
|
||||||
Test malformed and prohibited invocations of the backup API.
|
Test malformed and prohibited invocations of the backup API.
|
||||||
"""
|
"""
|
||||||
with iotests.FilePaths(['img', 'bsync1']) as \
|
with iotests.FilePath('img', 'bsync1') as (img_path, backup_path), \
|
||||||
(img_path, backup_path), \
|
|
||||||
iotests.VM() as vm:
|
iotests.VM() as vm:
|
||||||
|
|
||||||
log("\n=== API failure tests ===\n")
|
log("\n=== API failure tests ===\n")
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Test the handling of errors in write requests with multiple allocations
|
||||||
|
#
|
||||||
|
# Copyright (C) 2020 Igalia, S.L.
|
||||||
|
# Author: Alberto Garcia <berto@igalia.com>
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
# creator
|
||||||
|
owner=berto@igalia.com
|
||||||
|
|
||||||
|
seq=`basename $0`
|
||||||
|
echo "QA output created by $seq"
|
||||||
|
|
||||||
|
status=1 # failure is the default!
|
||||||
|
|
||||||
|
_cleanup()
|
||||||
|
{
|
||||||
|
_cleanup_test_img
|
||||||
|
}
|
||||||
|
trap "_cleanup; exit \$status" 0 1 2 3 15
|
||||||
|
|
||||||
|
# get standard environment, filters and checks
|
||||||
|
. ./common.rc
|
||||||
|
. ./common.filter
|
||||||
|
|
||||||
|
_supported_fmt qcow2
|
||||||
|
_supported_proto file
|
||||||
|
_supported_os Linux
|
||||||
|
_unsupported_imgopts cluster_size refcount_bits extended_l2 compat=0.10 data_file
|
||||||
|
|
||||||
|
echo '### Create the image'
|
||||||
|
_make_test_img -o refcount_bits=64,cluster_size=1k 1M
|
||||||
|
|
||||||
|
# The reference counts of the clusters for the first 123k of this
|
||||||
|
# write request are stored in the first refcount block. The last
|
||||||
|
# cluster (guest offset 123k) is referenced in the second refcount
|
||||||
|
# block.
|
||||||
|
echo '### Fill the first refcount block and one data cluster from the second'
|
||||||
|
$QEMU_IO -c 'write 0 124k' "$TEST_IMG" | _filter_qemu_io
|
||||||
|
|
||||||
|
echo '### Discard two of the last data clusters, leave one in the middle'
|
||||||
|
$QEMU_IO -c 'discard 121k 1k' "$TEST_IMG" | _filter_qemu_io
|
||||||
|
$QEMU_IO -c 'discard 123k 1k' "$TEST_IMG" | _filter_qemu_io
|
||||||
|
|
||||||
|
echo '### Corrupt the offset of the second refcount block'
|
||||||
|
refcount_table_offset=$(peek_file_be "$TEST_IMG" 48 8)
|
||||||
|
poke_file "$TEST_IMG" $(($refcount_table_offset+14)) "\x06"
|
||||||
|
|
||||||
|
# This tries to allocate the two clusters discarded earlier (guest
|
||||||
|
# offsets 121k and 123k). Their reference counts are in the first and
|
||||||
|
# second refcount blocks respectively, but only the first one can be
|
||||||
|
# allocated correctly because the second entry of the refcount table
|
||||||
|
# is corrupted.
|
||||||
|
echo '### Try to allocate the discarded clusters again'
|
||||||
|
$QEMU_IO -c 'write 121k 3k' "$TEST_IMG" | _filter_qemu_io
|
||||||
|
|
||||||
|
# success, all done
|
||||||
|
echo "*** done"
|
||||||
|
rm -f $seq.full
|
||||||
|
status=0
|
|
@ -0,0 +1,16 @@
|
||||||
|
QA output created by 305
|
||||||
|
### Create the image
|
||||||
|
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576
|
||||||
|
### Fill the first refcount block and one data cluster from the second
|
||||||
|
wrote 126976/126976 bytes at offset 0
|
||||||
|
124 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
### Discard two of the last data clusters, leave one in the middle
|
||||||
|
discard 1024/1024 bytes at offset 123904
|
||||||
|
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
discard 1024/1024 bytes at offset 125952
|
||||||
|
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||||
|
### Corrupt the offset of the second refcount block
|
||||||
|
### Try to allocate the discarded clusters again
|
||||||
|
qcow2: Marking image as corrupt: Refblock offset 0x20600 unaligned (reftable index: 0x1); further corruption events will be suppressed
|
||||||
|
write failed: Input/output error
|
||||||
|
*** done
|
|
@ -313,3 +313,4 @@
|
||||||
302 quick
|
302 quick
|
||||||
303 rw quick
|
303 rw quick
|
||||||
304 rw quick
|
304 rw quick
|
||||||
|
305 rw quick
|
||||||
|
|
|
@ -448,42 +448,45 @@ class Timeout:
|
||||||
def file_pattern(name):
|
def file_pattern(name):
|
||||||
return "{0}-{1}".format(os.getpid(), name)
|
return "{0}-{1}".format(os.getpid(), name)
|
||||||
|
|
||||||
class FilePaths:
|
class FilePath:
|
||||||
"""
|
"""
|
||||||
FilePaths is an auto-generated filename that cleans itself up.
|
Context manager generating multiple file names. The generated files are
|
||||||
|
removed when exiting the context.
|
||||||
|
|
||||||
Use this context manager to generate filenames and ensure that the file
|
Example usage:
|
||||||
gets deleted::
|
|
||||||
|
with FilePath('a.img', 'b.img') as (img_a, img_b):
|
||||||
|
# Use img_a and img_b here...
|
||||||
|
|
||||||
|
# a.img and b.img are automatically removed here.
|
||||||
|
|
||||||
|
By default images are created in iotests.test_dir. To create sockets use
|
||||||
|
iotests.sock_dir:
|
||||||
|
|
||||||
|
with FilePath('a.sock', base_dir=iotests.sock_dir) as sock:
|
||||||
|
|
||||||
|
For convenience, calling with one argument yields a single file instead of
|
||||||
|
a tuple with one item.
|
||||||
|
|
||||||
with FilePaths(['test.img']) as img_path:
|
|
||||||
qemu_img('create', img_path, '1G')
|
|
||||||
# migration_sock_path is automatically deleted
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, names, base_dir=test_dir):
|
def __init__(self, *names, base_dir=test_dir):
|
||||||
self.paths = []
|
self.paths = [os.path.join(base_dir, file_pattern(name))
|
||||||
for name in names:
|
for name in names]
|
||||||
self.paths.append(os.path.join(base_dir, file_pattern(name)))
|
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self.paths
|
if len(self.paths) == 1:
|
||||||
|
return self.paths[0]
|
||||||
|
else:
|
||||||
|
return self.paths
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
try:
|
for path in self.paths:
|
||||||
for path in self.paths:
|
try:
|
||||||
os.remove(path)
|
os.remove(path)
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
return False
|
return False
|
||||||
|
|
||||||
class FilePath(FilePaths):
|
|
||||||
"""
|
|
||||||
FilePath is a specialization of FilePaths that takes a single filename.
|
|
||||||
"""
|
|
||||||
def __init__(self, name, base_dir=test_dir):
|
|
||||||
super(FilePath, self).__init__([name], base_dir)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self.paths[0]
|
|
||||||
|
|
||||||
def file_path_remover():
|
def file_path_remover():
|
||||||
for path in reversed(file_path_remover.paths):
|
for path in reversed(file_path_remover.paths):
|
||||||
|
|
Loading…
Reference in New Issue