qcow2: Add l2_entry_size()

qcow2 images with subclusters have 128-bit L2 entries. The first 64
bits contain the same information as traditional images and the last
64 bits form a bitmap with the status of each individual subcluster.

Because of that we cannot assume that L2 entries are sizeof(uint64_t)
anymore. This function returns the proper value for the image.

Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-Id: <d34d578bd0380e739e2dde3e8dd6187d3d249fa9.1594396418.git.berto@igalia.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
Alberto Garcia 2020-07-10 18:12:54 +02:00 committed by Max Reitz
parent 3e71981592
commit c8fd8554d9
4 changed files with 27 additions and 16 deletions

View File

@ -208,7 +208,7 @@ static int l2_load(BlockDriverState *bs, uint64_t offset,
uint64_t l2_offset, uint64_t **l2_slice) uint64_t l2_offset, uint64_t **l2_slice)
{ {
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
int start_of_slice = sizeof(uint64_t) * int start_of_slice = l2_entry_size(s) *
(offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
@ -281,7 +281,7 @@ static int l2_allocate(BlockDriverState *bs, int l1_index)
/* allocate a new l2 entry */ /* allocate a new l2 entry */
l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); l2_offset = qcow2_alloc_clusters(bs, s->l2_size * l2_entry_size(s));
if (l2_offset < 0) { if (l2_offset < 0) {
ret = l2_offset; ret = l2_offset;
goto fail; goto fail;
@ -305,7 +305,7 @@ static int l2_allocate(BlockDriverState *bs, int l1_index)
/* allocate a new entry in the l2 cache */ /* allocate a new entry in the l2 cache */
slice_size2 = s->l2_slice_size * sizeof(uint64_t); slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2; n_slices = s->cluster_size / slice_size2;
trace_qcow2_l2_allocate_get_empty(bs, l1_index); trace_qcow2_l2_allocate_get_empty(bs, l1_index);
@ -369,7 +369,7 @@ fail:
} }
s->l1_table[l1_index] = old_l2_offset; s->l1_table[l1_index] = old_l2_offset;
if (l2_offset > 0) { if (l2_offset > 0) {
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s),
QCOW2_DISCARD_ALWAYS); QCOW2_DISCARD_ALWAYS);
} }
return ret; return ret;
@ -717,7 +717,7 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
/* Then decrease the refcount of the old table */ /* Then decrease the refcount of the old table */
if (l2_offset) { if (l2_offset) {
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s),
QCOW2_DISCARD_OTHER); QCOW2_DISCARD_OTHER);
} }
@ -1921,7 +1921,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
int ret; int ret;
int i, j; int i, j;
slice_size2 = s->l2_slice_size * sizeof(uint64_t); slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2; n_slices = s->cluster_size / slice_size2;
if (!is_active_l1) { if (!is_active_l1) {

View File

@ -1254,7 +1254,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
l2_slice = NULL; l2_slice = NULL;
l1_table = NULL; l1_table = NULL;
l1_size2 = l1_size * sizeof(uint64_t); l1_size2 = l1_size * sizeof(uint64_t);
slice_size2 = s->l2_slice_size * sizeof(uint64_t); slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2; n_slices = s->cluster_size / slice_size2;
s->cache_discards = true; s->cache_discards = true;
@ -1605,7 +1605,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
int i, l2_size, nb_csectors, ret; int i, l2_size, nb_csectors, ret;
/* Read L2 table from disk */ /* Read L2 table from disk */
l2_size = s->l2_size * sizeof(uint64_t); l2_size = s->l2_size * l2_entry_size(s);
l2_table = g_malloc(l2_size); l2_table = g_malloc(l2_size);
ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size); ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
@ -1680,15 +1680,16 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR",
offset); offset);
if (fix & BDRV_FIX_ERRORS) { if (fix & BDRV_FIX_ERRORS) {
int idx = i * (l2_entry_size(s) / sizeof(uint64_t));
uint64_t l2e_offset = uint64_t l2e_offset =
l2_offset + (uint64_t)i * sizeof(uint64_t); l2_offset + (uint64_t)i * l2_entry_size(s);
int ign = active ? QCOW2_OL_ACTIVE_L2 : int ign = active ? QCOW2_OL_ACTIVE_L2 :
QCOW2_OL_INACTIVE_L2; QCOW2_OL_INACTIVE_L2;
l2_entry = QCOW_OFLAG_ZERO; l2_entry = QCOW_OFLAG_ZERO;
set_l2_entry(s, l2_table, i, l2_entry); set_l2_entry(s, l2_table, i, l2_entry);
ret = qcow2_pre_write_overlap_check(bs, ign, ret = qcow2_pre_write_overlap_check(bs, ign,
l2e_offset, sizeof(uint64_t), false); l2e_offset, l2_entry_size(s), false);
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "ERROR: Overlap check failed\n"); fprintf(stderr, "ERROR: Overlap check failed\n");
res->check_errors++; res->check_errors++;
@ -1698,7 +1699,8 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
} }
ret = bdrv_pwrite_sync(bs->file, l2e_offset, ret = bdrv_pwrite_sync(bs->file, l2e_offset,
&l2_table[i], sizeof(uint64_t)); &l2_table[idx],
l2_entry_size(s));
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "ERROR: Failed to overwrite L2 " fprintf(stderr, "ERROR: Failed to overwrite L2 "
"table entry: %s\n", strerror(-ret)); "table entry: %s\n", strerror(-ret));
@ -1905,7 +1907,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
} }
ret = bdrv_pread(bs->file, l2_offset, l2_table, ret = bdrv_pread(bs->file, l2_offset, l2_table,
s->l2_size * sizeof(uint64_t)); s->l2_size * l2_entry_size(s));
if (ret < 0) { if (ret < 0) {
fprintf(stderr, "ERROR: Could not read L2 table: %s\n", fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
strerror(-ret)); strerror(-ret));

View File

@ -883,7 +883,7 @@ static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts,
uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size);
/* An L2 table is always one cluster in size so the max cache size /* An L2 table is always one cluster in size so the max cache size
* should be a multiple of the cluster size. */ * should be a multiple of the cluster size. */
uint64_t max_l2_cache = ROUND_UP(max_l2_entries * sizeof(uint64_t), uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s),
s->cluster_size); s->cluster_size);
combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
@ -1042,7 +1042,7 @@ static int qcow2_update_options_prepare(BlockDriverState *bs,
} }
} }
r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t); r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s);
r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
l2_cache_entry_size); l2_cache_entry_size);
r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
@ -1489,7 +1489,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
bs->encrypted = true; bs->encrypted = true;
} }
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s));
s->l2_size = 1 << s->l2_bits; s->l2_size = 1 << s->l2_bits;
/* 2^(s->refcount_order - 3) is the refcount width in bytes */ /* 2^(s->refcount_order - 3) is the refcount width in bytes */
s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
@ -4238,7 +4238,7 @@ static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
* preallocation. All that matters is that we will not have to allocate * preallocation. All that matters is that we will not have to allocate
* new refcount structures for them.) */ * new refcount structures for them.) */
nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
s->cluster_size / sizeof(uint64_t)); s->cluster_size / l2_entry_size(s));
/* The cluster range may not be aligned to L2 boundaries, so add one L2 /* The cluster range may not be aligned to L2 boundaries, so add one L2
* table for a potential head/tail */ * table for a potential head/tail */
nb_new_l2_tables++; nb_new_l2_tables++;

View File

@ -80,6 +80,10 @@
#define QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER 32 #define QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER 32
/* Size of normal and extended L2 entries */
#define L2E_SIZE_NORMAL (sizeof(uint64_t))
#define L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
#define MIN_CLUSTER_BITS 9 #define MIN_CLUSTER_BITS 9
#define MAX_CLUSTER_BITS 21 #define MAX_CLUSTER_BITS 21
@ -521,6 +525,11 @@ static inline bool has_subclusters(BDRVQcow2State *s)
return false; return false;
} }
static inline size_t l2_entry_size(BDRVQcow2State *s)
{
return has_subclusters(s) ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
}
static inline uint64_t get_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice, static inline uint64_t get_l2_entry(BDRVQcow2State *s, uint64_t *l2_slice,
int idx) int idx)
{ {