mirror of https://github.com/xemu-project/xemu.git
Merge tag 'v1.7.2' into xbox
This commit is contained in:
commit
ff304138d7
92
arch_init.c
92
arch_init.c
|
@ -857,64 +857,60 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|||
{
|
||||
ram_addr_t addr;
|
||||
int flags, ret = 0;
|
||||
int error;
|
||||
static uint64_t seq_iter;
|
||||
|
||||
seq_iter++;
|
||||
|
||||
if (version_id < 4 || version_id > 4) {
|
||||
if (version_id != 4) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
do {
|
||||
while (!ret) {
|
||||
addr = qemu_get_be64(f);
|
||||
|
||||
flags = addr & ~TARGET_PAGE_MASK;
|
||||
addr &= TARGET_PAGE_MASK;
|
||||
|
||||
if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
|
||||
if (version_id == 4) {
|
||||
/* Synchronize RAM block list */
|
||||
char id[256];
|
||||
ram_addr_t length;
|
||||
ram_addr_t total_ram_bytes = addr;
|
||||
/* Synchronize RAM block list */
|
||||
char id[256];
|
||||
ram_addr_t length;
|
||||
ram_addr_t total_ram_bytes = addr;
|
||||
|
||||
while (total_ram_bytes) {
|
||||
RAMBlock *block;
|
||||
uint8_t len;
|
||||
while (total_ram_bytes) {
|
||||
RAMBlock *block;
|
||||
uint8_t len;
|
||||
|
||||
len = qemu_get_byte(f);
|
||||
qemu_get_buffer(f, (uint8_t *)id, len);
|
||||
id[len] = 0;
|
||||
length = qemu_get_be64(f);
|
||||
len = qemu_get_byte(f);
|
||||
qemu_get_buffer(f, (uint8_t *)id, len);
|
||||
id[len] = 0;
|
||||
length = qemu_get_be64(f);
|
||||
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (!strncmp(id, block->idstr, sizeof(id))) {
|
||||
if (block->length != length) {
|
||||
fprintf(stderr,
|
||||
"Length mismatch: %s: " RAM_ADDR_FMT
|
||||
" in != " RAM_ADDR_FMT "\n", id, length,
|
||||
block->length);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (!strncmp(id, block->idstr, sizeof(id))) {
|
||||
if (block->length != length) {
|
||||
fprintf(stderr,
|
||||
"Length mismatch: %s: " RAM_ADDR_FMT
|
||||
" in != " RAM_ADDR_FMT "\n", id, length,
|
||||
block->length);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!block) {
|
||||
fprintf(stderr, "Unknown ramblock \"%s\", cannot "
|
||||
"accept migration\n", id);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
total_ram_bytes -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & RAM_SAVE_FLAG_COMPRESS) {
|
||||
if (!block) {
|
||||
fprintf(stderr, "Unknown ramblock \"%s\", cannot "
|
||||
"accept migration\n", id);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret) {
|
||||
break;
|
||||
}
|
||||
|
||||
total_ram_bytes -= length;
|
||||
}
|
||||
} else if (flags & RAM_SAVE_FLAG_COMPRESS) {
|
||||
void *host;
|
||||
uint8_t ch;
|
||||
|
||||
|
@ -941,20 +937,24 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|||
}
|
||||
|
||||
if (load_xbzrle(f, addr, host) < 0) {
|
||||
error_report("Failed to decompress XBZRLE page at "
|
||||
RAM_ADDR_FMT, addr);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
} else if (flags & RAM_SAVE_FLAG_HOOK) {
|
||||
ram_control_load_hook(f, flags);
|
||||
} else if (flags & RAM_SAVE_FLAG_EOS) {
|
||||
/* normal exit */
|
||||
break;
|
||||
} else {
|
||||
error_report("Unknown migration flags: %#x", flags);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
error = qemu_file_get_error(f);
|
||||
if (error) {
|
||||
ret = error;
|
||||
goto done;
|
||||
}
|
||||
} while (!(flags & RAM_SAVE_FLAG_EOS));
|
||||
ret = qemu_file_get_error(f);
|
||||
}
|
||||
|
||||
done:
|
||||
DPRINTF("Completed load of VM with exit code %d seq iteration "
|
||||
"%" PRIu64 "\n", ret, seq_iter);
|
||||
return ret;
|
||||
|
|
14
async.c
14
async.c
|
@ -117,15 +117,21 @@ void qemu_bh_schedule_idle(QEMUBH *bh)
|
|||
|
||||
void qemu_bh_schedule(QEMUBH *bh)
|
||||
{
|
||||
AioContext *ctx;
|
||||
|
||||
if (bh->scheduled)
|
||||
return;
|
||||
ctx = bh->ctx;
|
||||
bh->idle = 0;
|
||||
/* Make sure that idle & any writes needed by the callback are done
|
||||
* before the locations are read in the aio_bh_poll.
|
||||
/* Make sure that:
|
||||
* 1. idle & any writes needed by the callback are done before the
|
||||
* locations are read in the aio_bh_poll.
|
||||
* 2. ctx is loaded before scheduled is set and the callback has a chance
|
||||
* to execute.
|
||||
*/
|
||||
smp_wmb();
|
||||
smp_mb();
|
||||
bh->scheduled = 1;
|
||||
aio_notify(bh->ctx);
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
||||
|
||||
|
|
40
block.c
40
block.c
|
@ -966,14 +966,14 @@ fail:
|
|||
*/
|
||||
int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
|
||||
{
|
||||
char backing_filename[PATH_MAX];
|
||||
int back_flags, ret;
|
||||
char *backing_filename = g_malloc0(PATH_MAX);
|
||||
int back_flags, ret = 0;
|
||||
BlockDriver *back_drv = NULL;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (bs->backing_hd != NULL) {
|
||||
QDECREF(options);
|
||||
return 0;
|
||||
goto free_exit;
|
||||
}
|
||||
|
||||
/* NULL means an empty set of options */
|
||||
|
@ -986,10 +986,9 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
|
|||
backing_filename[0] = '\0';
|
||||
} else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
|
||||
QDECREF(options);
|
||||
return 0;
|
||||
goto free_exit;
|
||||
} else {
|
||||
bdrv_get_full_backing_filename(bs, backing_filename,
|
||||
sizeof(backing_filename));
|
||||
bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
|
||||
}
|
||||
|
||||
bs->backing_hd = bdrv_new("");
|
||||
|
@ -1012,11 +1011,14 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
|
|||
error_setg(errp, "Could not open backing file: %s",
|
||||
error_get_pretty(local_err));
|
||||
error_free(local_err);
|
||||
return ret;
|
||||
goto free_exit;
|
||||
}
|
||||
pstrcpy(bs->backing_file, sizeof(bs->backing_file),
|
||||
bs->backing_hd->file->filename);
|
||||
return 0;
|
||||
ret = 0;
|
||||
free_exit:
|
||||
g_free(backing_filename);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1032,7 +1034,8 @@ int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
|
|||
{
|
||||
int ret;
|
||||
/* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
|
||||
char tmp_filename[PATH_MAX + 1];
|
||||
char *backing_filename = NULL;
|
||||
char *tmp_filename = g_malloc0(PATH_MAX + 1);
|
||||
BlockDriverState *file = NULL;
|
||||
QDict *file_options = NULL;
|
||||
const char *drvname;
|
||||
|
@ -1052,7 +1055,7 @@ int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
|
|||
int64_t total_size;
|
||||
BlockDriver *bdrv_qcow2;
|
||||
QEMUOptionParameter *create_options;
|
||||
char backing_filename[PATH_MAX];
|
||||
backing_filename = g_malloc0(PATH_MAX);
|
||||
|
||||
if (qdict_size(options) != 0) {
|
||||
error_setg(errp, "Can't use snapshot=on with driver-specific options");
|
||||
|
@ -1064,9 +1067,9 @@ int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
|
|||
/* if snapshot, we create a temporary backing file and open it
|
||||
instead of opening 'filename' directly */
|
||||
|
||||
/* if there is a backing file, use it */
|
||||
bs1 = bdrv_new("");
|
||||
ret = bdrv_open(bs1, filename, NULL, 0, drv, &local_err);
|
||||
ret = bdrv_open(bs1, filename, NULL, BDRV_O_NO_BACKING, drv,
|
||||
&local_err);
|
||||
if (ret < 0) {
|
||||
bdrv_unref(bs1);
|
||||
goto fail;
|
||||
|
@ -1075,7 +1078,7 @@ int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
|
|||
|
||||
bdrv_unref(bs1);
|
||||
|
||||
ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
|
||||
ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not get temporary filename");
|
||||
goto fail;
|
||||
|
@ -1083,8 +1086,7 @@ int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
|
|||
|
||||
/* Real path is meaningless for protocols */
|
||||
if (path_has_protocol(filename)) {
|
||||
snprintf(backing_filename, sizeof(backing_filename),
|
||||
"%s", filename);
|
||||
snprintf(backing_filename, PATH_MAX, "%s", filename);
|
||||
} else if (!realpath(filename, backing_filename)) {
|
||||
ret = -errno;
|
||||
error_setg_errno(errp, errno, "Could not resolve path '%s'", filename);
|
||||
|
@ -1206,6 +1208,8 @@ fail:
|
|||
if (error_is_set(&local_err)) {
|
||||
error_propagate(errp, local_err);
|
||||
}
|
||||
g_free(tmp_filename);
|
||||
g_free(backing_filename);
|
||||
return ret;
|
||||
|
||||
close_and_fail:
|
||||
|
@ -1214,6 +1218,8 @@ close_and_fail:
|
|||
if (error_is_set(&local_err)) {
|
||||
error_propagate(errp, local_err);
|
||||
}
|
||||
g_free(tmp_filename);
|
||||
g_free(backing_filename);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2271,6 +2277,10 @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
|
|||
static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
|
||||
int nb_sectors)
|
||||
{
|
||||
if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
|
||||
nb_sectors * BDRV_SECTOR_SIZE);
|
||||
}
|
||||
|
|
109
block/bochs.c
109
block/bochs.c
|
@ -38,57 +38,42 @@
|
|||
|
||||
// not allocated: 0xffffffff
|
||||
|
||||
// always little-endian
|
||||
struct bochs_header_v1 {
|
||||
char magic[32]; // "Bochs Virtual HD Image"
|
||||
char type[16]; // "Redolog"
|
||||
char subtype[16]; // "Undoable" / "Volatile" / "Growing"
|
||||
uint32_t version;
|
||||
uint32_t header; // size of header
|
||||
|
||||
union {
|
||||
struct {
|
||||
uint32_t catalog; // num of entries
|
||||
uint32_t bitmap; // bitmap size
|
||||
uint32_t extent; // extent size
|
||||
uint64_t disk; // disk size
|
||||
char padding[HEADER_SIZE - 64 - 8 - 20];
|
||||
} redolog;
|
||||
char padding[HEADER_SIZE - 64 - 8];
|
||||
} extra;
|
||||
};
|
||||
|
||||
// always little-endian
|
||||
struct bochs_header {
|
||||
char magic[32]; // "Bochs Virtual HD Image"
|
||||
char type[16]; // "Redolog"
|
||||
char subtype[16]; // "Undoable" / "Volatile" / "Growing"
|
||||
char magic[32]; /* "Bochs Virtual HD Image" */
|
||||
char type[16]; /* "Redolog" */
|
||||
char subtype[16]; /* "Undoable" / "Volatile" / "Growing" */
|
||||
uint32_t version;
|
||||
uint32_t header; // size of header
|
||||
uint32_t header; /* size of header */
|
||||
|
||||
uint32_t catalog; /* num of entries */
|
||||
uint32_t bitmap; /* bitmap size */
|
||||
uint32_t extent; /* extent size */
|
||||
|
||||
union {
|
||||
struct {
|
||||
uint32_t catalog; // num of entries
|
||||
uint32_t bitmap; // bitmap size
|
||||
uint32_t extent; // extent size
|
||||
uint32_t reserved; // for ???
|
||||
uint64_t disk; // disk size
|
||||
char padding[HEADER_SIZE - 64 - 8 - 24];
|
||||
} redolog;
|
||||
char padding[HEADER_SIZE - 64 - 8];
|
||||
struct {
|
||||
uint32_t reserved; /* for ??? */
|
||||
uint64_t disk; /* disk size */
|
||||
char padding[HEADER_SIZE - 64 - 20 - 12];
|
||||
} QEMU_PACKED redolog;
|
||||
struct {
|
||||
uint64_t disk; /* disk size */
|
||||
char padding[HEADER_SIZE - 64 - 20 - 8];
|
||||
} QEMU_PACKED redolog_v1;
|
||||
char padding[HEADER_SIZE - 64 - 20];
|
||||
} extra;
|
||||
};
|
||||
} QEMU_PACKED;
|
||||
|
||||
typedef struct BDRVBochsState {
|
||||
CoMutex lock;
|
||||
uint32_t *catalog_bitmap;
|
||||
int catalog_size;
|
||||
uint32_t catalog_size;
|
||||
|
||||
int data_offset;
|
||||
uint32_t data_offset;
|
||||
|
||||
int bitmap_blocks;
|
||||
int extent_blocks;
|
||||
int extent_size;
|
||||
uint32_t bitmap_blocks;
|
||||
uint32_t extent_blocks;
|
||||
uint32_t extent_size;
|
||||
} BDRVBochsState;
|
||||
|
||||
static int bochs_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
|
@ -112,9 +97,8 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
Error **errp)
|
||||
{
|
||||
BDRVBochsState *s = bs->opaque;
|
||||
int i;
|
||||
uint32_t i;
|
||||
struct bochs_header bochs;
|
||||
struct bochs_header_v1 header_v1;
|
||||
int ret;
|
||||
|
||||
bs->read_only = 1; // no write support yet
|
||||
|
@ -133,13 +117,19 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
|
||||
if (le32_to_cpu(bochs.version) == HEADER_V1) {
|
||||
memcpy(&header_v1, &bochs, sizeof(bochs));
|
||||
bs->total_sectors = le64_to_cpu(header_v1.extra.redolog.disk) / 512;
|
||||
bs->total_sectors = le64_to_cpu(bochs.extra.redolog_v1.disk) / 512;
|
||||
} else {
|
||||
bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
|
||||
bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
|
||||
}
|
||||
|
||||
/* Limit to 1M entries to avoid unbounded allocation. This is what is
|
||||
* needed for the largest image that bximage can create (~8 TB). */
|
||||
s->catalog_size = le32_to_cpu(bochs.catalog);
|
||||
if (s->catalog_size > 0x100000) {
|
||||
error_setg(errp, "Catalog size is too large");
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
s->catalog_size = le32_to_cpu(bochs.extra.redolog.catalog);
|
||||
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
|
||||
|
||||
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
|
||||
|
@ -153,10 +143,24 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
|
||||
s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4);
|
||||
|
||||
s->bitmap_blocks = 1 + (le32_to_cpu(bochs.extra.redolog.bitmap) - 1) / 512;
|
||||
s->extent_blocks = 1 + (le32_to_cpu(bochs.extra.redolog.extent) - 1) / 512;
|
||||
s->bitmap_blocks = 1 + (le32_to_cpu(bochs.bitmap) - 1) / 512;
|
||||
s->extent_blocks = 1 + (le32_to_cpu(bochs.extent) - 1) / 512;
|
||||
|
||||
s->extent_size = le32_to_cpu(bochs.extra.redolog.extent);
|
||||
s->extent_size = le32_to_cpu(bochs.extent);
|
||||
if (s->extent_size == 0) {
|
||||
error_setg(errp, "Extent size may not be zero");
|
||||
return -EINVAL;
|
||||
} else if (s->extent_size > 0x800000) {
|
||||
error_setg(errp, "Extent size %" PRIu32 " is too large",
|
||||
s->extent_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (s->catalog_size < bs->total_sectors / s->extent_size) {
|
||||
error_setg(errp, "Catalog size is too small for this disk size");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
qemu_co_mutex_init(&s->lock);
|
||||
return 0;
|
||||
|
@ -169,8 +173,8 @@ fail:
|
|||
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
|
||||
{
|
||||
BDRVBochsState *s = bs->opaque;
|
||||
int64_t offset = sector_num * 512;
|
||||
int64_t extent_index, extent_offset, bitmap_offset;
|
||||
uint64_t offset = sector_num * 512;
|
||||
uint64_t extent_index, extent_offset, bitmap_offset;
|
||||
char bitmap_entry;
|
||||
|
||||
// seek to sector
|
||||
|
@ -181,8 +185,9 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
|
|||
return -1; /* not allocated */
|
||||
}
|
||||
|
||||
bitmap_offset = s->data_offset + (512 * s->catalog_bitmap[extent_index] *
|
||||
(s->extent_blocks + s->bitmap_blocks));
|
||||
bitmap_offset = s->data_offset +
|
||||
(512 * (uint64_t) s->catalog_bitmap[extent_index] *
|
||||
(s->extent_blocks + s->bitmap_blocks));
|
||||
|
||||
/* read in bitmap for current extent */
|
||||
if (bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8),
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
#include "qemu/module.h"
|
||||
#include <zlib.h>
|
||||
|
||||
/* Maximum compressed block size */
|
||||
#define MAX_BLOCK_SIZE (64 * 1024 * 1024)
|
||||
|
||||
typedef struct BDRVCloopState {
|
||||
CoMutex lock;
|
||||
uint32_t block_size;
|
||||
|
@ -68,6 +71,26 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
return ret;
|
||||
}
|
||||
s->block_size = be32_to_cpu(s->block_size);
|
||||
if (s->block_size % 512) {
|
||||
error_setg(errp, "block_size %u must be a multiple of 512",
|
||||
s->block_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (s->block_size == 0) {
|
||||
error_setg(errp, "block_size cannot be zero");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but
|
||||
* we can accept more. Prevent ridiculous values like 4 GB - 1 since we
|
||||
* need a buffer this big.
|
||||
*/
|
||||
if (s->block_size > MAX_BLOCK_SIZE) {
|
||||
error_setg(errp, "block_size %u must be %u MB or less",
|
||||
s->block_size,
|
||||
MAX_BLOCK_SIZE / (1024 * 1024));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
|
||||
if (ret < 0) {
|
||||
|
@ -76,7 +99,23 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
s->n_blocks = be32_to_cpu(s->n_blocks);
|
||||
|
||||
/* read offsets */
|
||||
offsets_size = s->n_blocks * sizeof(uint64_t);
|
||||
if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
|
||||
/* Prevent integer overflow */
|
||||
error_setg(errp, "n_blocks %u must be %zu or less",
|
||||
s->n_blocks,
|
||||
(UINT32_MAX - 1) / sizeof(uint64_t));
|
||||
return -EINVAL;
|
||||
}
|
||||
offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
|
||||
if (offsets_size > 512 * 1024 * 1024) {
|
||||
/* Prevent ridiculous offsets_size which causes memory allocation to
|
||||
* fail or overflows bdrv_pread() size. In practice the 512 MB
|
||||
* offsets[] limit supports 16 TB images at 256 KB block size.
|
||||
*/
|
||||
error_setg(errp, "image requires too many offsets, "
|
||||
"try increasing block size");
|
||||
return -EINVAL;
|
||||
}
|
||||
s->offsets = g_malloc(offsets_size);
|
||||
|
||||
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
|
||||
|
@ -84,13 +123,37 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
for(i=0;i<s->n_blocks;i++) {
|
||||
for (i = 0; i < s->n_blocks + 1; i++) {
|
||||
uint64_t size;
|
||||
|
||||
s->offsets[i] = be64_to_cpu(s->offsets[i]);
|
||||
if (i > 0) {
|
||||
uint32_t size = s->offsets[i] - s->offsets[i - 1];
|
||||
if (size > max_compressed_block_size) {
|
||||
max_compressed_block_size = size;
|
||||
}
|
||||
if (i == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (s->offsets[i] < s->offsets[i - 1]) {
|
||||
error_setg(errp, "offsets not monotonically increasing at "
|
||||
"index %u, image file is corrupt", i);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
size = s->offsets[i] - s->offsets[i - 1];
|
||||
|
||||
/* Compressed blocks should be smaller than the uncompressed block size
|
||||
* but maybe compression performed poorly so the compressed block is
|
||||
* actually bigger. Clamp down on unrealistic values to prevent
|
||||
* ridiculous s->compressed_block allocation.
|
||||
*/
|
||||
if (size > 2 * MAX_BLOCK_SIZE) {
|
||||
error_setg(errp, "invalid compressed block size at index %u, "
|
||||
"image file is corrupt", i);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (size > max_compressed_block_size) {
|
||||
max_compressed_block_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,9 +243,7 @@ static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num,
|
|||
static void cloop_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVCloopState *s = bs->opaque;
|
||||
if (s->n_blocks > 0) {
|
||||
g_free(s->offsets);
|
||||
}
|
||||
g_free(s->offsets);
|
||||
g_free(s->compressed_block);
|
||||
g_free(s->uncompressed_block);
|
||||
inflateEnd(&s->zstream);
|
||||
|
|
86
block/curl.c
86
block/curl.c
|
@ -34,6 +34,11 @@
|
|||
#define DPRINTF(fmt, ...) do { } while (0)
|
||||
#endif
|
||||
|
||||
#if LIBCURL_VERSION_NUM >= 0x071000
|
||||
/* The multi interface timer callback was introduced in 7.16.0 */
|
||||
#define NEED_CURL_TIMER_CALLBACK
|
||||
#endif
|
||||
|
||||
#define PROTOCOLS (CURLPROTO_HTTP | CURLPROTO_HTTPS | \
|
||||
CURLPROTO_FTP | CURLPROTO_FTPS | \
|
||||
CURLPROTO_TFTP)
|
||||
|
@ -77,6 +82,7 @@ typedef struct CURLState
|
|||
|
||||
typedef struct BDRVCURLState {
|
||||
CURLM *multi;
|
||||
QEMUTimer timer;
|
||||
size_t len;
|
||||
CURLState states[CURL_NUM_STATES];
|
||||
char *url;
|
||||
|
@ -87,6 +93,23 @@ typedef struct BDRVCURLState {
|
|||
static void curl_clean_state(CURLState *s);
|
||||
static void curl_multi_do(void *arg);
|
||||
|
||||
#ifdef NEED_CURL_TIMER_CALLBACK
|
||||
static int curl_timer_cb(CURLM *multi, long timeout_ms, void *opaque)
|
||||
{
|
||||
BDRVCURLState *s = opaque;
|
||||
|
||||
DPRINTF("CURL: timer callback timeout_ms %ld\n", timeout_ms);
|
||||
if (timeout_ms == -1) {
|
||||
timer_del(&s->timer);
|
||||
} else {
|
||||
int64_t timeout_ns = (int64_t)timeout_ms * 1000 * 1000;
|
||||
timer_mod(&s->timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ns);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
|
||||
void *s, void *sp)
|
||||
{
|
||||
|
@ -134,6 +157,11 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
|
|||
if (!s || !s->orig_buf)
|
||||
goto read_end;
|
||||
|
||||
if (s->buf_off >= s->buf_len) {
|
||||
/* buffer full, read nothing */
|
||||
return 0;
|
||||
}
|
||||
realsize = MIN(realsize, s->buf_len - s->buf_off);
|
||||
memcpy(s->orig_buf + s->buf_off, ptr, realsize);
|
||||
s->buf_off += realsize;
|
||||
|
||||
|
@ -209,20 +237,10 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
|
|||
return FIND_RET_NONE;
|
||||
}
|
||||
|
||||
static void curl_multi_do(void *arg)
|
||||
static void curl_multi_read(BDRVCURLState *s)
|
||||
{
|
||||
BDRVCURLState *s = (BDRVCURLState *)arg;
|
||||
int running;
|
||||
int r;
|
||||
int msgs_in_queue;
|
||||
|
||||
if (!s->multi)
|
||||
return;
|
||||
|
||||
do {
|
||||
r = curl_multi_socket_all(s->multi, &running);
|
||||
} while(r == CURLM_CALL_MULTI_PERFORM);
|
||||
|
||||
/* Try to find done transfers, so we can free the easy
|
||||
* handle again. */
|
||||
do {
|
||||
|
@ -266,6 +284,41 @@ static void curl_multi_do(void *arg)
|
|||
} while(msgs_in_queue);
|
||||
}
|
||||
|
||||
static void curl_multi_do(void *arg)
|
||||
{
|
||||
BDRVCURLState *s = (BDRVCURLState *)arg;
|
||||
int running;
|
||||
int r;
|
||||
|
||||
if (!s->multi) {
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
r = curl_multi_socket_all(s->multi, &running);
|
||||
} while(r == CURLM_CALL_MULTI_PERFORM);
|
||||
|
||||
curl_multi_read(s);
|
||||
}
|
||||
|
||||
static void curl_multi_timeout_do(void *arg)
|
||||
{
|
||||
#ifdef NEED_CURL_TIMER_CALLBACK
|
||||
BDRVCURLState *s = (BDRVCURLState *)arg;
|
||||
int running;
|
||||
|
||||
if (!s->multi) {
|
||||
return;
|
||||
}
|
||||
|
||||
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
|
||||
|
||||
curl_multi_read(s);
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
||||
static CURLState *curl_init_state(BDRVCURLState *s)
|
||||
{
|
||||
CURLState *state = NULL;
|
||||
|
@ -473,12 +526,20 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
curl_easy_cleanup(state->curl);
|
||||
state->curl = NULL;
|
||||
|
||||
aio_timer_init(bdrv_get_aio_context(bs), &s->timer,
|
||||
QEMU_CLOCK_REALTIME, SCALE_NS,
|
||||
curl_multi_timeout_do, s);
|
||||
|
||||
// Now we know the file exists and its size, so let's
|
||||
// initialize the multi interface!
|
||||
|
||||
s->multi = curl_multi_init();
|
||||
curl_multi_setopt(s->multi, CURLMOPT_SOCKETDATA, s);
|
||||
curl_multi_setopt(s->multi, CURLMOPT_SOCKETFUNCTION, curl_sock_cb);
|
||||
#ifdef NEED_CURL_TIMER_CALLBACK
|
||||
curl_multi_setopt(s->multi, CURLMOPT_TIMERDATA, s);
|
||||
curl_multi_setopt(s->multi, CURLMOPT_TIMERFUNCTION, curl_timer_cb);
|
||||
#endif
|
||||
curl_multi_do(s);
|
||||
|
||||
qemu_opts_del(opts);
|
||||
|
@ -597,6 +658,9 @@ static void curl_close(BlockDriverState *bs)
|
|||
}
|
||||
if (s->multi)
|
||||
curl_multi_cleanup(s->multi);
|
||||
|
||||
timer_del(&s->timer);
|
||||
|
||||
g_free(s->url);
|
||||
}
|
||||
|
||||
|
|
269
block/dmg.c
269
block/dmg.c
|
@ -27,6 +27,14 @@
|
|||
#include "qemu/module.h"
|
||||
#include <zlib.h>
|
||||
|
||||
enum {
|
||||
/* Limit chunk sizes to prevent unreasonable amounts of memory being used
|
||||
* or truncating when converting to 32-bit types
|
||||
*/
|
||||
DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
|
||||
DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
|
||||
};
|
||||
|
||||
typedef struct BDRVDMGState {
|
||||
CoMutex lock;
|
||||
/* each chunk contains a certain number of sectors,
|
||||
|
@ -92,13 +100,44 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Increase max chunk sizes, if necessary. This function is used to calculate
|
||||
* the buffer sizes needed for compressed/uncompressed chunk I/O.
|
||||
*/
|
||||
static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
|
||||
uint32_t *max_compressed_size,
|
||||
uint32_t *max_sectors_per_chunk)
|
||||
{
|
||||
uint32_t compressed_size = 0;
|
||||
uint32_t uncompressed_sectors = 0;
|
||||
|
||||
switch (s->types[chunk]) {
|
||||
case 0x80000005: /* zlib compressed */
|
||||
compressed_size = s->lengths[chunk];
|
||||
uncompressed_sectors = s->sectorcounts[chunk];
|
||||
break;
|
||||
case 1: /* copy */
|
||||
uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
|
||||
break;
|
||||
case 2: /* zero */
|
||||
uncompressed_sectors = s->sectorcounts[chunk];
|
||||
break;
|
||||
}
|
||||
|
||||
if (compressed_size > *max_compressed_size) {
|
||||
*max_compressed_size = compressed_size;
|
||||
}
|
||||
if (uncompressed_sectors > *max_sectors_per_chunk) {
|
||||
*max_sectors_per_chunk = uncompressed_sectors;
|
||||
}
|
||||
}
|
||||
|
||||
static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
uint64_t info_begin,info_end,last_in_offset,last_out_offset;
|
||||
uint64_t info_begin, info_end, last_in_offset, last_out_offset;
|
||||
uint32_t count, tmp;
|
||||
uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i;
|
||||
uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
|
||||
int64_t offset;
|
||||
int ret;
|
||||
|
||||
|
@ -160,37 +199,40 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (type == 0x6d697368 && count >= 244) {
|
||||
int new_size, chunk_count;
|
||||
if (type == 0x6d697368 && count >= 244) {
|
||||
size_t new_size;
|
||||
uint32_t chunk_count;
|
||||
|
||||
offset += 4;
|
||||
offset += 200;
|
||||
|
||||
chunk_count = (count-204)/40;
|
||||
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
|
||||
s->types = g_realloc(s->types, new_size/2);
|
||||
s->offsets = g_realloc(s->offsets, new_size);
|
||||
s->lengths = g_realloc(s->lengths, new_size);
|
||||
s->sectors = g_realloc(s->sectors, new_size);
|
||||
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
|
||||
chunk_count = (count - 204) / 40;
|
||||
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
|
||||
s->types = g_realloc(s->types, new_size / 2);
|
||||
s->offsets = g_realloc(s->offsets, new_size);
|
||||
s->lengths = g_realloc(s->lengths, new_size);
|
||||
s->sectors = g_realloc(s->sectors, new_size);
|
||||
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
|
||||
|
||||
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
|
||||
ret = read_uint32(bs, offset, &s->types[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
offset += 4;
|
||||
if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) {
|
||||
if(s->types[i]==0xffffffff) {
|
||||
last_in_offset = s->offsets[i-1]+s->lengths[i-1];
|
||||
last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1];
|
||||
}
|
||||
chunk_count--;
|
||||
i--;
|
||||
offset += 36;
|
||||
continue;
|
||||
}
|
||||
offset += 4;
|
||||
offset += 4;
|
||||
if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
|
||||
s->types[i] != 2) {
|
||||
if (s->types[i] == 0xffffffff && i > 0) {
|
||||
last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
|
||||
last_out_offset = s->sectors[i - 1] +
|
||||
s->sectorcounts[i - 1];
|
||||
}
|
||||
chunk_count--;
|
||||
i--;
|
||||
offset += 36;
|
||||
continue;
|
||||
}
|
||||
offset += 4;
|
||||
|
||||
ret = read_uint64(bs, offset, &s->sectors[i]);
|
||||
if (ret < 0) {
|
||||
|
@ -205,6 +247,14 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
offset += 8;
|
||||
|
||||
if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
|
||||
error_report("sector count %" PRIu64 " for chunk %u is "
|
||||
"larger than max (%u)",
|
||||
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = read_uint64(bs, offset, &s->offsets[i]);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
|
@ -218,19 +268,25 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
offset += 8;
|
||||
|
||||
if(s->lengths[i]>max_compressed_size)
|
||||
max_compressed_size = s->lengths[i];
|
||||
if(s->sectorcounts[i]>max_sectors_per_chunk)
|
||||
max_sectors_per_chunk = s->sectorcounts[i];
|
||||
}
|
||||
s->n_chunks+=chunk_count;
|
||||
}
|
||||
if (s->lengths[i] > DMG_LENGTHS_MAX) {
|
||||
error_report("length %" PRIu64 " for chunk %u is larger "
|
||||
"than max (%u)",
|
||||
s->lengths[i], i, DMG_LENGTHS_MAX);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
update_max_chunk_size(s, i, &max_compressed_size,
|
||||
&max_sectors_per_chunk);
|
||||
}
|
||||
s->n_chunks += chunk_count;
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize zlib engine */
|
||||
s->compressed_chunk = g_malloc(max_compressed_size+1);
|
||||
s->uncompressed_chunk = g_malloc(512*max_sectors_per_chunk);
|
||||
if(inflateInit(&s->zstream) != Z_OK) {
|
||||
s->compressed_chunk = g_malloc(max_compressed_size + 1);
|
||||
s->uncompressed_chunk = g_malloc(512 * max_sectors_per_chunk);
|
||||
if (inflateInit(&s->zstream) != Z_OK) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -252,83 +308,82 @@ fail:
|
|||
}
|
||||
|
||||
static inline int is_sector_in_chunk(BDRVDMGState* s,
|
||||
uint32_t chunk_num,int sector_num)
|
||||
uint32_t chunk_num, uint64_t sector_num)
|
||||
{
|
||||
if(chunk_num>=s->n_chunks || s->sectors[chunk_num]>sector_num ||
|
||||
s->sectors[chunk_num]+s->sectorcounts[chunk_num]<=sector_num)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
|
||||
s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint32_t search_chunk(BDRVDMGState* s,int sector_num)
|
||||
static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
|
||||
{
|
||||
/* binary search */
|
||||
uint32_t chunk1=0,chunk2=s->n_chunks,chunk3;
|
||||
while(chunk1!=chunk2) {
|
||||
chunk3 = (chunk1+chunk2)/2;
|
||||
if(s->sectors[chunk3]>sector_num)
|
||||
chunk2 = chunk3;
|
||||
else if(s->sectors[chunk3]+s->sectorcounts[chunk3]>sector_num)
|
||||
return chunk3;
|
||||
else
|
||||
chunk1 = chunk3;
|
||||
uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
|
||||
while (chunk1 != chunk2) {
|
||||
chunk3 = (chunk1 + chunk2) / 2;
|
||||
if (s->sectors[chunk3] > sector_num) {
|
||||
chunk2 = chunk3;
|
||||
} else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
|
||||
return chunk3;
|
||||
} else {
|
||||
chunk1 = chunk3;
|
||||
}
|
||||
}
|
||||
return s->n_chunks; /* error */
|
||||
}
|
||||
|
||||
static inline int dmg_read_chunk(BlockDriverState *bs, int sector_num)
|
||||
static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
|
||||
if(!is_sector_in_chunk(s,s->current_chunk,sector_num)) {
|
||||
int ret;
|
||||
uint32_t chunk = search_chunk(s,sector_num);
|
||||
if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
|
||||
int ret;
|
||||
uint32_t chunk = search_chunk(s, sector_num);
|
||||
|
||||
if(chunk>=s->n_chunks)
|
||||
return -1;
|
||||
if (chunk >= s->n_chunks) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->current_chunk = s->n_chunks;
|
||||
switch(s->types[chunk]) {
|
||||
case 0x80000005: { /* zlib compressed */
|
||||
int i;
|
||||
s->current_chunk = s->n_chunks;
|
||||
switch (s->types[chunk]) {
|
||||
case 0x80000005: { /* zlib compressed */
|
||||
/* we need to buffer, because only the chunk as whole can be
|
||||
* inflated. */
|
||||
ret = bdrv_pread(bs->file, s->offsets[chunk],
|
||||
s->compressed_chunk, s->lengths[chunk]);
|
||||
if (ret != s->lengths[chunk]) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* we need to buffer, because only the chunk as whole can be
|
||||
* inflated. */
|
||||
i=0;
|
||||
do {
|
||||
ret = bdrv_pread(bs->file, s->offsets[chunk] + i,
|
||||
s->compressed_chunk+i, s->lengths[chunk]-i);
|
||||
if(ret<0 && errno==EINTR)
|
||||
ret=0;
|
||||
i+=ret;
|
||||
} while(ret>=0 && ret+i<s->lengths[chunk]);
|
||||
|
||||
if (ret != s->lengths[chunk])
|
||||
return -1;
|
||||
|
||||
s->zstream.next_in = s->compressed_chunk;
|
||||
s->zstream.avail_in = s->lengths[chunk];
|
||||
s->zstream.next_out = s->uncompressed_chunk;
|
||||
s->zstream.avail_out = 512*s->sectorcounts[chunk];
|
||||
ret = inflateReset(&s->zstream);
|
||||
if(ret != Z_OK)
|
||||
return -1;
|
||||
ret = inflate(&s->zstream, Z_FINISH);
|
||||
if(ret != Z_STREAM_END || s->zstream.total_out != 512*s->sectorcounts[chunk])
|
||||
return -1;
|
||||
break; }
|
||||
case 1: /* copy */
|
||||
ret = bdrv_pread(bs->file, s->offsets[chunk],
|
||||
s->zstream.next_in = s->compressed_chunk;
|
||||
s->zstream.avail_in = s->lengths[chunk];
|
||||
s->zstream.next_out = s->uncompressed_chunk;
|
||||
s->zstream.avail_out = 512 * s->sectorcounts[chunk];
|
||||
ret = inflateReset(&s->zstream);
|
||||
if (ret != Z_OK) {
|
||||
return -1;
|
||||
}
|
||||
ret = inflate(&s->zstream, Z_FINISH);
|
||||
if (ret != Z_STREAM_END ||
|
||||
s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
|
||||
return -1;
|
||||
}
|
||||
break; }
|
||||
case 1: /* copy */
|
||||
ret = bdrv_pread(bs->file, s->offsets[chunk],
|
||||
s->uncompressed_chunk, s->lengths[chunk]);
|
||||
if (ret != s->lengths[chunk])
|
||||
return -1;
|
||||
break;
|
||||
case 2: /* zero */
|
||||
memset(s->uncompressed_chunk, 0, 512*s->sectorcounts[chunk]);
|
||||
break;
|
||||
}
|
||||
s->current_chunk = chunk;
|
||||
if (ret != s->lengths[chunk]) {
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case 2: /* zero */
|
||||
memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
|
||||
break;
|
||||
}
|
||||
s->current_chunk = chunk;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -339,12 +394,14 @@ static int dmg_read(BlockDriverState *bs, int64_t sector_num,
|
|||
BDRVDMGState *s = bs->opaque;
|
||||
int i;
|
||||
|
||||
for(i=0;i<nb_sectors;i++) {
|
||||
uint32_t sector_offset_in_chunk;
|
||||
if(dmg_read_chunk(bs, sector_num+i) != 0)
|
||||
return -1;
|
||||
sector_offset_in_chunk = sector_num+i-s->sectors[s->current_chunk];
|
||||
memcpy(buf+i*512,s->uncompressed_chunk+sector_offset_in_chunk*512,512);
|
||||
for (i = 0; i < nb_sectors; i++) {
|
||||
uint32_t sector_offset_in_chunk;
|
||||
if (dmg_read_chunk(bs, sector_num + i) != 0) {
|
||||
return -1;
|
||||
}
|
||||
sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
|
||||
memcpy(buf + i * 512,
|
||||
s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -376,12 +433,12 @@ static void dmg_close(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static BlockDriver bdrv_dmg = {
|
||||
.format_name = "dmg",
|
||||
.instance_size = sizeof(BDRVDMGState),
|
||||
.bdrv_probe = dmg_probe,
|
||||
.bdrv_open = dmg_open,
|
||||
.bdrv_read = dmg_co_read,
|
||||
.bdrv_close = dmg_close,
|
||||
.format_name = "dmg",
|
||||
.instance_size = sizeof(BDRVDMGState),
|
||||
.bdrv_probe = dmg_probe,
|
||||
.bdrv_open = dmg_open,
|
||||
.bdrv_read = dmg_co_read,
|
||||
.bdrv_close = dmg_close,
|
||||
};
|
||||
|
||||
static void bdrv_dmg_init(void)
|
||||
|
|
|
@ -65,6 +65,7 @@ typedef struct IscsiTask {
|
|||
int do_retry;
|
||||
struct scsi_task *task;
|
||||
Coroutine *co;
|
||||
QEMUBH *bh;
|
||||
} IscsiTask;
|
||||
|
||||
typedef struct IscsiAIOCB {
|
||||
|
@ -121,6 +122,13 @@ iscsi_schedule_bh(IscsiAIOCB *acb)
|
|||
qemu_bh_schedule(acb->bh);
|
||||
}
|
||||
|
||||
static void iscsi_co_generic_bh_cb(void *opaque)
|
||||
{
|
||||
struct IscsiTask *iTask = opaque;
|
||||
qemu_bh_delete(iTask->bh);
|
||||
qemu_coroutine_enter(iTask->co, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
|
||||
void *command_data, void *opaque)
|
||||
|
@ -135,17 +143,19 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
|
|||
|
||||
if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION
|
||||
&& task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
|
||||
error_report("iSCSI CheckCondition: %s", iscsi_get_error(iscsi));
|
||||
iTask->do_retry = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (status != SCSI_STATUS_GOOD) {
|
||||
error_report("iSCSI: Failure. %s", iscsi_get_error(iscsi));
|
||||
error_report("iSCSI Failure: %s", iscsi_get_error(iscsi));
|
||||
}
|
||||
|
||||
out:
|
||||
if (iTask->co) {
|
||||
qemu_coroutine_enter(iTask->co, NULL);
|
||||
iTask->bh = qemu_bh_new(iscsi_co_generic_bh_cb, iTask);
|
||||
qemu_bh_schedule(iTask->bh);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -859,6 +869,7 @@ retry:
|
|||
scsi_free_scsi_task(iTask.task);
|
||||
iTask.task = NULL;
|
||||
}
|
||||
iTask.complete = 0;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -955,6 +966,7 @@ retry:
|
|||
}
|
||||
|
||||
if (iTask.do_retry) {
|
||||
iTask.complete = 0;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,14 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
|
|||
}
|
||||
|
||||
g_slice_free(MirrorOp, op);
|
||||
qemu_coroutine_enter(s->common.co, NULL);
|
||||
|
||||
/* Enter coroutine when it is not sleeping. The coroutine sleeps to
|
||||
* rate-limit itself. The coroutine will eventually resume since there is
|
||||
* a sleep timeout so don't wake it early.
|
||||
*/
|
||||
if (s->common.busy) {
|
||||
qemu_coroutine_enter(s->common.co, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void mirror_write_complete(void *opaque, int ret)
|
||||
|
@ -136,11 +143,12 @@ static void mirror_read_complete(void *opaque, int ret)
|
|||
mirror_write_complete, op);
|
||||
}
|
||||
|
||||
static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
||||
static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
||||
{
|
||||
BlockDriverState *source = s->common.bs;
|
||||
int nb_sectors, sectors_per_chunk, nb_chunks;
|
||||
int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
|
||||
uint64_t delay_ns;
|
||||
MirrorOp *op;
|
||||
|
||||
s->sector_num = hbitmap_iter_next(&s->hbi);
|
||||
|
@ -227,7 +235,12 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
|||
nb_chunks += added_chunks;
|
||||
next_sector += added_sectors;
|
||||
next_chunk += added_chunks;
|
||||
} while (next_sector < end);
|
||||
if (!s->synced && s->common.speed) {
|
||||
delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
|
||||
} else {
|
||||
delay_ns = 0;
|
||||
}
|
||||
} while (delay_ns == 0 && next_sector < end);
|
||||
|
||||
/* Allocate a MirrorOp that is used as an AIO callback. */
|
||||
op = g_slice_new(MirrorOp);
|
||||
|
@ -263,6 +276,7 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
|
|||
trace_mirror_one_iteration(s, sector_num, nb_sectors);
|
||||
bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
|
||||
mirror_read_complete, op);
|
||||
return delay_ns;
|
||||
}
|
||||
|
||||
static void mirror_free_init(MirrorBlockJob *s)
|
||||
|
@ -358,7 +372,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
bdrv_dirty_iter_init(bs, &s->hbi);
|
||||
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
for (;;) {
|
||||
uint64_t delay_ns;
|
||||
uint64_t delay_ns = 0;
|
||||
int64_t cnt;
|
||||
bool should_complete;
|
||||
|
||||
|
@ -382,8 +396,10 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
qemu_coroutine_yield();
|
||||
continue;
|
||||
} else if (cnt != 0) {
|
||||
mirror_iteration(s);
|
||||
continue;
|
||||
delay_ns = mirror_iteration(s);
|
||||
if (delay_ns == 0) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -428,17 +444,10 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
trace_mirror_before_sleep(s, cnt, s->synced);
|
||||
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
|
||||
if (!s->synced) {
|
||||
/* Publish progress */
|
||||
s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE;
|
||||
|
||||
if (s->common.speed) {
|
||||
delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk);
|
||||
} else {
|
||||
delay_ns = 0;
|
||||
}
|
||||
|
||||
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
|
||||
if (block_job_is_cancelled(&s->common)) {
|
||||
break;
|
||||
|
|
|
@ -49,9 +49,9 @@ typedef struct BDRVParallelsState {
|
|||
CoMutex lock;
|
||||
|
||||
uint32_t *catalog_bitmap;
|
||||
int catalog_size;
|
||||
unsigned int catalog_size;
|
||||
|
||||
int tracks;
|
||||
unsigned int tracks;
|
||||
} BDRVParallelsState;
|
||||
|
||||
static int parallels_probe(const uint8_t *buf, int buf_size, const char *filename)
|
||||
|
@ -92,8 +92,18 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
bs->total_sectors = le32_to_cpu(ph.nb_sectors);
|
||||
|
||||
s->tracks = le32_to_cpu(ph.tracks);
|
||||
if (s->tracks == 0) {
|
||||
error_setg(errp, "Invalid image: Zero sectors per track");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->catalog_size = le32_to_cpu(ph.catalog_entries);
|
||||
if (s->catalog_size > INT_MAX / 4) {
|
||||
error_setg(errp, "Catalog too large");
|
||||
ret = -EFBIG;
|
||||
goto fail;
|
||||
}
|
||||
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
|
||||
|
||||
ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
|
||||
|
|
|
@ -471,6 +471,7 @@ static void dump_qobject(fprintf_function func_fprintf, void *f,
|
|||
case QTYPE_QERROR: {
|
||||
QString *value = qerror_human((QError *)obj);
|
||||
func_fprintf(f, "%s", qstring_get_str(value));
|
||||
QDECREF(value);
|
||||
break;
|
||||
}
|
||||
case QTYPE_NONE:
|
||||
|
|
43
block/qcow.c
43
block/qcow.c
|
@ -48,9 +48,10 @@ typedef struct QCowHeader {
|
|||
uint64_t size; /* in bytes */
|
||||
uint8_t cluster_bits;
|
||||
uint8_t l2_bits;
|
||||
uint16_t padding;
|
||||
uint32_t crypt_method;
|
||||
uint64_t l1_table_offset;
|
||||
} QCowHeader;
|
||||
} QEMU_PACKED QCowHeader;
|
||||
|
||||
#define L2_CACHE_SIZE 16
|
||||
|
||||
|
@ -60,7 +61,7 @@ typedef struct BDRVQcowState {
|
|||
int cluster_sectors;
|
||||
int l2_bits;
|
||||
int l2_size;
|
||||
int l1_size;
|
||||
unsigned int l1_size;
|
||||
uint64_t cluster_offset_mask;
|
||||
uint64_t l1_table_offset;
|
||||
uint64_t *l1_table;
|
||||
|
@ -96,7 +97,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
Error **errp)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int len, i, shift, ret;
|
||||
unsigned int len, i, shift;
|
||||
int ret;
|
||||
QCowHeader header;
|
||||
|
||||
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
|
||||
|
@ -125,10 +127,25 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (header.size <= 1 || header.cluster_bits < 9) {
|
||||
if (header.size <= 1) {
|
||||
error_setg(errp, "Image size is too small (must be at least 2 bytes)");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (header.cluster_bits < 9 || header.cluster_bits > 16) {
|
||||
error_setg(errp, "Cluster size must be between 512 and 64k");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* l2_bits specifies number of entries; storing a uint64_t in each entry,
|
||||
* so bytes = num_entries << 3. */
|
||||
if (header.l2_bits < 9 - 3 || header.l2_bits > 16 - 3) {
|
||||
error_setg(errp, "L2 table size must be between 512 and 64k");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (header.crypt_method > QCOW_CRYPT_AES) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
|
@ -147,7 +164,19 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
|
||||
/* read the level 1 table */
|
||||
shift = s->cluster_bits + s->l2_bits;
|
||||
s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
|
||||
if (header.size > UINT64_MAX - (1LL << shift)) {
|
||||
error_setg(errp, "Image too large");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
} else {
|
||||
uint64_t l1_size = (header.size + (1LL << shift) - 1) >> shift;
|
||||
if (l1_size > INT_MAX / sizeof(uint64_t)) {
|
||||
error_setg(errp, "Image too large");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
s->l1_size = l1_size;
|
||||
}
|
||||
|
||||
s->l1_table_offset = header.l1_table_offset;
|
||||
s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t));
|
||||
|
@ -171,7 +200,9 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
if (header.backing_file_offset != 0) {
|
||||
len = header.backing_file_size;
|
||||
if (len > 1023) {
|
||||
len = 1023;
|
||||
error_setg(errp, "Backing file name too long");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
ret = bdrv_pread(bs->file, header.backing_file_offset,
|
||||
bs->backing_file, len);
|
||||
|
|
|
@ -55,7 +55,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
|
|||
}
|
||||
}
|
||||
|
||||
if (new_l1_size > INT_MAX) {
|
||||
if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
|
@ -359,15 +359,6 @@ static int coroutine_fn copy_sectors(BlockDriverState *bs,
|
|||
struct iovec iov;
|
||||
int n, ret;
|
||||
|
||||
/*
|
||||
* If this is the last cluster and it is only partially used, we must only
|
||||
* copy until the end of the image, or bdrv_check_request will fail for the
|
||||
* bdrv_read/write calls below.
|
||||
*/
|
||||
if (start_sect + n_end > bs->total_sectors) {
|
||||
n_end = bs->total_sectors - start_sect;
|
||||
}
|
||||
|
||||
n = n_end - n_start;
|
||||
if (n <= 0) {
|
||||
return 0;
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "qemu/range.h"
|
||||
#include "qapi/qmp/types.h"
|
||||
|
||||
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
|
||||
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
|
||||
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
|
||||
int64_t offset, int64_t length,
|
||||
int addend, enum qcow2_discard_type type);
|
||||
|
@ -40,8 +40,10 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
|
|||
int qcow2_refcount_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int ret, refcount_table_size2, i;
|
||||
unsigned int refcount_table_size2, i;
|
||||
int ret;
|
||||
|
||||
assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
|
||||
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
|
||||
s->refcount_table = g_malloc(refcount_table_size2);
|
||||
if (s->refcount_table_size > 0) {
|
||||
|
@ -87,7 +89,7 @@ static int load_refcount_block(BlockDriverState *bs,
|
|||
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int refcount_table_index, block_index;
|
||||
uint64_t refcount_table_index, block_index;
|
||||
int64_t refcount_block_offset;
|
||||
int ret;
|
||||
uint16_t *refcount_block;
|
||||
|
@ -191,10 +193,11 @@ static int alloc_refcount_block(BlockDriverState *bs,
|
|||
* they can describe them themselves.
|
||||
*
|
||||
* - We need to consider that at this point we are inside update_refcounts
|
||||
* and doing the initial refcount increase. This means that some clusters
|
||||
* have already been allocated by the caller, but their refcount isn't
|
||||
* accurate yet. free_cluster_index tells us where this allocation ends
|
||||
* as long as we don't overwrite it by freeing clusters.
|
||||
* and potentially doing an initial refcount increase. This means that
|
||||
* some clusters have already been allocated by the caller, but their
|
||||
* refcount isn't accurate yet. If we allocate clusters for metadata, we
|
||||
* need to return -EAGAIN to signal the caller that it needs to restart
|
||||
* the search for free clusters.
|
||||
*
|
||||
* - alloc_clusters_noref and qcow2_free_clusters may load a different
|
||||
* refcount block into the cache
|
||||
|
@ -279,7 +282,10 @@ static int alloc_refcount_block(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
s->refcount_table[refcount_table_index] = new_block;
|
||||
return 0;
|
||||
|
||||
/* The new refcount block may be where the caller intended to put its
|
||||
* data, so let it restart the search. */
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
|
||||
|
@ -302,8 +308,11 @@ static int alloc_refcount_block(BlockDriverState *bs,
|
|||
|
||||
/* Calculate the number of refcount blocks needed so far */
|
||||
uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT);
|
||||
uint64_t blocks_used = (s->free_cluster_index +
|
||||
refcount_block_clusters - 1) / refcount_block_clusters;
|
||||
uint64_t blocks_used = DIV_ROUND_UP(cluster_index, refcount_block_clusters);
|
||||
|
||||
if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
/* And now we need at least one block more for the new metadata */
|
||||
uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
|
||||
|
@ -336,8 +345,6 @@ static int alloc_refcount_block(BlockDriverState *bs,
|
|||
uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size);
|
||||
uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t));
|
||||
|
||||
assert(meta_offset >= (s->free_cluster_index * s->cluster_size));
|
||||
|
||||
/* Fill the new refcount table */
|
||||
memcpy(new_table, s->refcount_table,
|
||||
s->refcount_table_size * sizeof(uint64_t));
|
||||
|
@ -400,18 +407,19 @@ static int alloc_refcount_block(BlockDriverState *bs,
|
|||
s->refcount_table_size = table_size;
|
||||
s->refcount_table_offset = table_offset;
|
||||
|
||||
/* Free old table. Remember, we must not change free_cluster_index */
|
||||
uint64_t old_free_cluster_index = s->free_cluster_index;
|
||||
/* Free old table. */
|
||||
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
|
||||
QCOW2_DISCARD_OTHER);
|
||||
s->free_cluster_index = old_free_cluster_index;
|
||||
|
||||
ret = load_refcount_block(bs, new_block, (void**) refcount_block);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* If we were trying to do the initial refcount update for some cluster
|
||||
* allocation, we might have used the same clusters to store newly
|
||||
* allocated metadata. Make the caller search some new space. */
|
||||
return -EAGAIN;
|
||||
|
||||
fail_table:
|
||||
g_free(new_table);
|
||||
|
@ -626,15 +634,16 @@ int qcow2_update_cluster_refcount(BlockDriverState *bs,
|
|||
|
||||
|
||||
/* return < 0 if error */
|
||||
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
|
||||
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int i, nb_clusters, refcount;
|
||||
uint64_t i, nb_clusters;
|
||||
int refcount;
|
||||
|
||||
nb_clusters = size_to_clusters(s, size);
|
||||
retry:
|
||||
for(i = 0; i < nb_clusters; i++) {
|
||||
int64_t next_cluster_index = s->free_cluster_index++;
|
||||
uint64_t next_cluster_index = s->free_cluster_index++;
|
||||
refcount = get_refcount(bs, next_cluster_index);
|
||||
|
||||
if (refcount < 0) {
|
||||
|
@ -651,18 +660,21 @@ retry:
|
|||
return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
|
||||
}
|
||||
|
||||
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
|
||||
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
|
||||
{
|
||||
int64_t offset;
|
||||
int ret;
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
|
||||
offset = alloc_clusters_noref(bs, size);
|
||||
if (offset < 0) {
|
||||
return offset;
|
||||
}
|
||||
do {
|
||||
offset = alloc_clusters_noref(bs, size);
|
||||
if (offset < 0) {
|
||||
return offset;
|
||||
}
|
||||
|
||||
ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -675,33 +687,36 @@ int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
|
|||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
uint64_t cluster_index;
|
||||
uint64_t old_free_cluster_index;
|
||||
int i, refcount, ret;
|
||||
uint64_t i;
|
||||
int refcount, ret;
|
||||
|
||||
/* Check how many clusters there are free */
|
||||
cluster_index = offset >> s->cluster_bits;
|
||||
for(i = 0; i < nb_clusters; i++) {
|
||||
refcount = get_refcount(bs, cluster_index++);
|
||||
|
||||
if (refcount < 0) {
|
||||
return refcount;
|
||||
} else if (refcount != 0) {
|
||||
break;
|
||||
}
|
||||
assert(nb_clusters >= 0);
|
||||
if (nb_clusters == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* And then allocate them */
|
||||
old_free_cluster_index = s->free_cluster_index;
|
||||
s->free_cluster_index = cluster_index + i;
|
||||
do {
|
||||
/* Check how many clusters there are free */
|
||||
cluster_index = offset >> s->cluster_bits;
|
||||
for(i = 0; i < nb_clusters; i++) {
|
||||
refcount = get_refcount(bs, cluster_index++);
|
||||
|
||||
if (refcount < 0) {
|
||||
return refcount;
|
||||
} else if (refcount != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* And then allocate them */
|
||||
ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
|
||||
QCOW2_DISCARD_NEVER);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
|
||||
QCOW2_DISCARD_NEVER);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
s->free_cluster_index = old_free_cluster_index;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -1004,8 +1019,7 @@ static void inc_refcounts(BlockDriverState *bs,
|
|||
int64_t offset, int64_t size)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int64_t start, last, cluster_offset;
|
||||
int k;
|
||||
uint64_t start, last, cluster_offset, k;
|
||||
|
||||
if (size <= 0)
|
||||
return;
|
||||
|
@ -1015,11 +1029,7 @@ static void inc_refcounts(BlockDriverState *bs,
|
|||
for(cluster_offset = start; cluster_offset <= last;
|
||||
cluster_offset += s->cluster_size) {
|
||||
k = cluster_offset >> s->cluster_bits;
|
||||
if (k < 0) {
|
||||
fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
|
||||
cluster_offset);
|
||||
res->corruptions++;
|
||||
} else if (k >= refcount_table_size) {
|
||||
if (k >= refcount_table_size) {
|
||||
fprintf(stderr, "Warning: cluster offset=0x%" PRIx64 " is after "
|
||||
"the end of the image file, can't properly check refcounts.\n",
|
||||
cluster_offset);
|
||||
|
@ -1460,14 +1470,19 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
|||
BdrvCheckMode fix)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int64_t size, i, highest_cluster;
|
||||
int nb_clusters, refcount1, refcount2;
|
||||
int64_t size, i, highest_cluster, nb_clusters;
|
||||
int refcount1, refcount2;
|
||||
QCowSnapshot *sn;
|
||||
uint16_t *refcount_table;
|
||||
int ret;
|
||||
|
||||
size = bdrv_getlength(bs->file);
|
||||
nb_clusters = size_to_clusters(s, size);
|
||||
if (nb_clusters > INT_MAX) {
|
||||
res->check_errors++;
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t));
|
||||
|
||||
res->bfi.total_clusters =
|
||||
|
|
|
@ -26,31 +26,6 @@
|
|||
#include "block/block_int.h"
|
||||
#include "block/qcow2.h"
|
||||
|
||||
typedef struct QEMU_PACKED QCowSnapshotHeader {
|
||||
/* header is 8 byte aligned */
|
||||
uint64_t l1_table_offset;
|
||||
|
||||
uint32_t l1_size;
|
||||
uint16_t id_str_size;
|
||||
uint16_t name_size;
|
||||
|
||||
uint32_t date_sec;
|
||||
uint32_t date_nsec;
|
||||
|
||||
uint64_t vm_clock_nsec;
|
||||
|
||||
uint32_t vm_state_size;
|
||||
uint32_t extra_data_size; /* for extension */
|
||||
/* extra data follows */
|
||||
/* id_str follows */
|
||||
/* name follows */
|
||||
} QCowSnapshotHeader;
|
||||
|
||||
typedef struct QEMU_PACKED QCowSnapshotExtraData {
|
||||
uint64_t vm_state_size_large;
|
||||
uint64_t disk_size;
|
||||
} QCowSnapshotExtraData;
|
||||
|
||||
void qcow2_free_snapshots(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
|
@ -357,6 +332,10 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
|||
uint64_t *l1_table = NULL;
|
||||
int64_t l1_table_offset;
|
||||
|
||||
if (s->nb_snapshots >= QCOW_MAX_SNAPSHOTS) {
|
||||
return -EFBIG;
|
||||
}
|
||||
|
||||
memset(sn, 0, sizeof(*sn));
|
||||
|
||||
/* Generate an ID if it wasn't passed */
|
||||
|
@ -694,7 +673,11 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs, const char *snapshot_name)
|
|||
sn = &s->snapshots[snapshot_index];
|
||||
|
||||
/* Allocate and read in the snapshot's L1 table */
|
||||
new_l1_bytes = s->l1_size * sizeof(uint64_t);
|
||||
if (sn->l1_size > QCOW_MAX_L1_SIZE) {
|
||||
error_report("Snapshot L1 table too large");
|
||||
return -EFBIG;
|
||||
}
|
||||
new_l1_bytes = sn->l1_size * sizeof(uint64_t);
|
||||
new_l1_table = g_malloc0(align_offset(new_l1_bytes, 512));
|
||||
|
||||
ret = bdrv_pread(bs->file, sn->l1_table_offset, new_l1_table, new_l1_bytes);
|
||||
|
|
198
block/qcow2.c
198
block/qcow2.c
|
@ -269,12 +269,15 @@ static int qcow2_mark_clean(BlockDriverState *bs)
|
|||
BDRVQcowState *s = bs->opaque;
|
||||
|
||||
if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
|
||||
int ret = bdrv_flush(bs);
|
||||
int ret;
|
||||
|
||||
s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
|
||||
|
||||
ret = bdrv_flush(bs);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
|
||||
return qcow2_update_header(bs);
|
||||
}
|
||||
return 0;
|
||||
|
@ -329,6 +332,32 @@ static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int validate_table_offset(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t entries, size_t entry_len)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
uint64_t size;
|
||||
|
||||
/* Use signed INT64_MAX as the maximum even for uint64_t header fields,
|
||||
* because values will be passed to qemu functions taking int64_t. */
|
||||
if (entries > INT64_MAX / entry_len) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
size = entries * entry_len;
|
||||
|
||||
if (INT64_MAX - size < offset) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Tables must be cluster aligned */
|
||||
if (offset & (s->cluster_size - 1)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static QemuOptsList qcow2_runtime_opts = {
|
||||
.name = "qcow2",
|
||||
.head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
|
||||
|
@ -419,7 +448,8 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
Error **errp)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int len, i, ret = 0;
|
||||
unsigned int len, i;
|
||||
int ret = 0;
|
||||
QCowHeader header;
|
||||
QemuOpts *opts;
|
||||
Error *local_err = NULL;
|
||||
|
@ -460,6 +490,18 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
|
||||
s->qcow_version = header.version;
|
||||
|
||||
/* Initialise cluster size */
|
||||
if (header.cluster_bits < MIN_CLUSTER_BITS ||
|
||||
header.cluster_bits > MAX_CLUSTER_BITS) {
|
||||
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->cluster_bits = header.cluster_bits;
|
||||
s->cluster_size = 1 << s->cluster_bits;
|
||||
s->cluster_sectors = 1 << (s->cluster_bits - 9);
|
||||
|
||||
/* Initialise version 3 header fields */
|
||||
if (header.version == 2) {
|
||||
header.incompatible_features = 0;
|
||||
|
@ -473,6 +515,18 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
be64_to_cpus(&header.autoclear_features);
|
||||
be32_to_cpus(&header.refcount_order);
|
||||
be32_to_cpus(&header.header_length);
|
||||
|
||||
if (header.header_length < 104) {
|
||||
error_setg(errp, "qcow2 header too short");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (header.header_length > s->cluster_size) {
|
||||
error_setg(errp, "qcow2 header exceeds cluster size");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (header.header_length > sizeof(header)) {
|
||||
|
@ -487,6 +541,12 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
}
|
||||
|
||||
if (header.backing_file_offset > s->cluster_size) {
|
||||
error_setg(errp, "Invalid backing file offset");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (header.backing_file_offset) {
|
||||
ext_end = header.backing_file_offset;
|
||||
} else {
|
||||
|
@ -529,12 +589,6 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
s->refcount_order = header.refcount_order;
|
||||
|
||||
if (header.cluster_bits < MIN_CLUSTER_BITS ||
|
||||
header.cluster_bits > MAX_CLUSTER_BITS) {
|
||||
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (header.crypt_method > QCOW_CRYPT_AES) {
|
||||
error_setg(errp, "Unsupported encryption method: %i",
|
||||
header.crypt_method);
|
||||
|
@ -545,23 +599,52 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
if (s->crypt_method_header) {
|
||||
bs->encrypted = 1;
|
||||
}
|
||||
s->cluster_bits = header.cluster_bits;
|
||||
s->cluster_size = 1 << s->cluster_bits;
|
||||
s->cluster_sectors = 1 << (s->cluster_bits - 9);
|
||||
|
||||
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
|
||||
s->l2_size = 1 << s->l2_bits;
|
||||
bs->total_sectors = header.size / 512;
|
||||
s->csize_shift = (62 - (s->cluster_bits - 8));
|
||||
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
|
||||
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
|
||||
|
||||
s->refcount_table_offset = header.refcount_table_offset;
|
||||
s->refcount_table_size =
|
||||
header.refcount_table_clusters << (s->cluster_bits - 3);
|
||||
|
||||
s->snapshots_offset = header.snapshots_offset;
|
||||
s->nb_snapshots = header.nb_snapshots;
|
||||
if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) {
|
||||
error_setg(errp, "Reference count table too large");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = validate_table_offset(bs, s->refcount_table_offset,
|
||||
s->refcount_table_size, sizeof(uint64_t));
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Invalid reference count table offset");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Snapshot table offset/length */
|
||||
if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {
|
||||
error_setg(errp, "Too many snapshots");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = validate_table_offset(bs, header.snapshots_offset,
|
||||
header.nb_snapshots,
|
||||
sizeof(QCowSnapshotHeader));
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Invalid snapshot table offset");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* read the level 1 table */
|
||||
if (header.l1_size > QCOW_MAX_L1_SIZE) {
|
||||
error_setg(errp, "Active L1 table too large");
|
||||
ret = -EFBIG;
|
||||
goto fail;
|
||||
}
|
||||
s->l1_size = header.l1_size;
|
||||
|
||||
l1_vm_state_index = size_to_l1(s, header.size);
|
||||
|
@ -579,7 +662,16 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = validate_table_offset(bs, header.l1_table_offset,
|
||||
header.l1_size, sizeof(uint64_t));
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Invalid L1 table offset");
|
||||
goto fail;
|
||||
}
|
||||
s->l1_table_offset = header.l1_table_offset;
|
||||
|
||||
|
||||
if (s->l1_size > 0) {
|
||||
s->l1_table = g_malloc0(
|
||||
align_offset(s->l1_size * sizeof(uint64_t), 512));
|
||||
|
@ -625,8 +717,10 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
/* read the backing file name */
|
||||
if (header.backing_file_offset != 0) {
|
||||
len = header.backing_file_size;
|
||||
if (len > 1023) {
|
||||
len = 1023;
|
||||
if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
|
||||
error_setg(errp, "Backing file name too long");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
ret = bdrv_pread(bs->file, header.backing_file_offset,
|
||||
bs->backing_file, len);
|
||||
|
@ -637,6 +731,10 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
bs->backing_file[len] = '\0';
|
||||
}
|
||||
|
||||
/* Internal snapshots */
|
||||
s->snapshots_offset = header.snapshots_offset;
|
||||
s->nb_snapshots = header.nb_snapshots;
|
||||
|
||||
ret = qcow2_read_snapshots(bs);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not read snapshots");
|
||||
|
@ -792,11 +890,25 @@ static int qcow2_set_key(BlockDriverState *bs, const char *key)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* We have nothing to do for QCOW2 reopen, stubs just return
|
||||
* success */
|
||||
/* We have no actual commit/abort logic for qcow2, but we need to write out any
|
||||
* unwritten data if we reopen read-only. */
|
||||
static int qcow2_reopen_prepare(BDRVReopenState *state,
|
||||
BlockReopenQueue *queue, Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((state->flags & BDRV_O_RDWR) == 0) {
|
||||
ret = bdrv_flush(state->bs);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qcow2_mark_clean(state->bs);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1471,8 +1583,8 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
|||
* size for any qcow2 image.
|
||||
*/
|
||||
BlockDriverState* bs;
|
||||
QCowHeader header;
|
||||
uint8_t* refcount_table;
|
||||
QCowHeader *header;
|
||||
uint64_t* refcount_table;
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
|
@ -1489,38 +1601,43 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
|||
}
|
||||
|
||||
/* Write the header */
|
||||
memset(&header, 0, sizeof(header));
|
||||
header.magic = cpu_to_be32(QCOW_MAGIC);
|
||||
header.version = cpu_to_be32(version);
|
||||
header.cluster_bits = cpu_to_be32(cluster_bits);
|
||||
header.size = cpu_to_be64(0);
|
||||
header.l1_table_offset = cpu_to_be64(0);
|
||||
header.l1_size = cpu_to_be32(0);
|
||||
header.refcount_table_offset = cpu_to_be64(cluster_size);
|
||||
header.refcount_table_clusters = cpu_to_be32(1);
|
||||
header.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT);
|
||||
header.header_length = cpu_to_be32(sizeof(header));
|
||||
QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header));
|
||||
header = g_malloc0(cluster_size);
|
||||
*header = (QCowHeader) {
|
||||
.magic = cpu_to_be32(QCOW_MAGIC),
|
||||
.version = cpu_to_be32(version),
|
||||
.cluster_bits = cpu_to_be32(cluster_bits),
|
||||
.size = cpu_to_be64(0),
|
||||
.l1_table_offset = cpu_to_be64(0),
|
||||
.l1_size = cpu_to_be32(0),
|
||||
.refcount_table_offset = cpu_to_be64(cluster_size),
|
||||
.refcount_table_clusters = cpu_to_be32(1),
|
||||
.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT),
|
||||
.header_length = cpu_to_be32(sizeof(*header)),
|
||||
};
|
||||
|
||||
if (flags & BLOCK_FLAG_ENCRYPT) {
|
||||
header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
|
||||
header->crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
|
||||
} else {
|
||||
header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
|
||||
header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
|
||||
}
|
||||
|
||||
if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) {
|
||||
header.compatible_features |=
|
||||
header->compatible_features |=
|
||||
cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
|
||||
}
|
||||
|
||||
ret = bdrv_pwrite(bs, 0, &header, sizeof(header));
|
||||
ret = bdrv_pwrite(bs, 0, header, cluster_size);
|
||||
g_free(header);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not write qcow2 header");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Write an empty refcount table */
|
||||
refcount_table = g_malloc0(cluster_size);
|
||||
ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size);
|
||||
/* Write a refcount table with one refcount block */
|
||||
refcount_table = g_malloc0(2 * cluster_size);
|
||||
refcount_table[0] = cpu_to_be64(2 * cluster_size);
|
||||
ret = bdrv_pwrite(bs, cluster_size, refcount_table, 2 * cluster_size);
|
||||
g_free(refcount_table);
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -1544,7 +1661,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = qcow2_alloc_clusters(bs, 2 * cluster_size);
|
||||
ret = qcow2_alloc_clusters(bs, 3 * cluster_size);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
|
||||
"header and refcount table");
|
||||
|
@ -1588,7 +1705,8 @@ static int qcow2_create2(const char *filename, int64_t total_size,
|
|||
|
||||
/* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning */
|
||||
ret = bdrv_open(bs, filename, NULL,
|
||||
BDRV_O_RDWR | BDRV_O_CACHE_WB, drv, &local_err);
|
||||
BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_BACKING,
|
||||
drv, &local_err);
|
||||
if (error_is_set(&local_err)) {
|
||||
error_propagate(errp, local_err);
|
||||
goto out;
|
||||
|
|
|
@ -38,6 +38,15 @@
|
|||
#define QCOW_CRYPT_AES 1
|
||||
|
||||
#define QCOW_MAX_CRYPT_CLUSTERS 32
|
||||
#define QCOW_MAX_SNAPSHOTS 65536
|
||||
|
||||
/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
|
||||
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
|
||||
#define QCOW_MAX_REFTABLE_SIZE 0x800000
|
||||
|
||||
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
|
||||
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
|
||||
#define QCOW_MAX_L1_SIZE 0x2000000
|
||||
|
||||
/* indicate that the refcount of the referenced cluster is exactly one. */
|
||||
#define QCOW_OFLAG_COPIED (1ULL << 63)
|
||||
|
@ -97,6 +106,32 @@ typedef struct QCowHeader {
|
|||
uint32_t header_length;
|
||||
} QEMU_PACKED QCowHeader;
|
||||
|
||||
typedef struct QEMU_PACKED QCowSnapshotHeader {
|
||||
/* header is 8 byte aligned */
|
||||
uint64_t l1_table_offset;
|
||||
|
||||
uint32_t l1_size;
|
||||
uint16_t id_str_size;
|
||||
uint16_t name_size;
|
||||
|
||||
uint32_t date_sec;
|
||||
uint32_t date_nsec;
|
||||
|
||||
uint64_t vm_clock_nsec;
|
||||
|
||||
uint32_t vm_state_size;
|
||||
uint32_t extra_data_size; /* for extension */
|
||||
/* extra data follows */
|
||||
/* id_str follows */
|
||||
/* name follows */
|
||||
} QCowSnapshotHeader;
|
||||
|
||||
typedef struct QEMU_PACKED QCowSnapshotExtraData {
|
||||
uint64_t vm_state_size_large;
|
||||
uint64_t disk_size;
|
||||
} QCowSnapshotExtraData;
|
||||
|
||||
|
||||
typedef struct QCowSnapshot {
|
||||
uint64_t l1_table_offset;
|
||||
uint32_t l1_size;
|
||||
|
@ -191,8 +226,8 @@ typedef struct BDRVQcowState {
|
|||
uint64_t *refcount_table;
|
||||
uint64_t refcount_table_offset;
|
||||
uint32_t refcount_table_size;
|
||||
int64_t free_cluster_index;
|
||||
int64_t free_byte_offset;
|
||||
uint64_t free_cluster_index;
|
||||
uint64_t free_byte_offset;
|
||||
|
||||
CoMutex lock;
|
||||
|
||||
|
@ -202,7 +237,7 @@ typedef struct BDRVQcowState {
|
|||
AES_KEY aes_decrypt_key;
|
||||
uint64_t snapshots_offset;
|
||||
int snapshots_size;
|
||||
int nb_snapshots;
|
||||
unsigned int nb_snapshots;
|
||||
QCowSnapshot *snapshots;
|
||||
|
||||
int flags;
|
||||
|
@ -383,6 +418,11 @@ static inline int64_t qcow2_vm_state_offset(BDRVQcowState *s)
|
|||
return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
|
||||
}
|
||||
|
||||
static inline uint64_t qcow2_max_refcount_clusters(BDRVQcowState *s)
|
||||
{
|
||||
return QCOW_MAX_REFTABLE_SIZE >> s->cluster_bits;
|
||||
}
|
||||
|
||||
static inline int qcow2_get_cluster_type(uint64_t l2_entry)
|
||||
{
|
||||
if (l2_entry & QCOW_OFLAG_COMPRESSED) {
|
||||
|
@ -431,7 +471,7 @@ void qcow2_refcount_close(BlockDriverState *bs);
|
|||
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
|
||||
int addend, enum qcow2_discard_type type);
|
||||
|
||||
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size);
|
||||
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
|
||||
int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
|
||||
int nb_clusters);
|
||||
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
|
||||
|
|
|
@ -2082,6 +2082,7 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
|||
strncpy(s->inode.tag, sn_info->name, sizeof(s->inode.tag));
|
||||
/* we don't need to update entire object */
|
||||
datalen = SD_INODE_SIZE - sizeof(s->inode.data_vdi_id);
|
||||
inode = g_malloc(datalen);
|
||||
|
||||
/* refresh inode. */
|
||||
fd = connect_to_sdog(s);
|
||||
|
@ -2105,8 +2106,6 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
inode = (SheepdogInode *)g_malloc(datalen);
|
||||
|
||||
ret = read_object(fd, (char *)inode, vid_to_vdi_oid(new_vid),
|
||||
s->inode.nr_copies, datalen, 0, s->cache_flags);
|
||||
|
||||
|
@ -2120,6 +2119,7 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
|
|||
s->inode.name, s->inode.snap_id, s->inode.vdi_id);
|
||||
|
||||
cleanup:
|
||||
g_free(inode);
|
||||
closesocket(fd);
|
||||
return ret;
|
||||
}
|
||||
|
|
31
block/vdi.c
31
block/vdi.c
|
@ -120,6 +120,11 @@ typedef unsigned char uuid_t[16];
|
|||
|
||||
#define VDI_IS_ALLOCATED(X) ((X) < VDI_DISCARDED)
|
||||
|
||||
/* max blocks in image is (0xffffffff / 4) */
|
||||
#define VDI_BLOCKS_IN_IMAGE_MAX 0x3fffffff
|
||||
#define VDI_DISK_SIZE_MAX ((uint64_t)VDI_BLOCKS_IN_IMAGE_MAX * \
|
||||
(uint64_t)DEFAULT_CLUSTER_SIZE)
|
||||
|
||||
#if !defined(CONFIG_UUID)
|
||||
static inline void uuid_generate(uuid_t out)
|
||||
{
|
||||
|
@ -384,6 +389,13 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
vdi_header_print(&header);
|
||||
#endif
|
||||
|
||||
if (header.disk_size > VDI_DISK_SIZE_MAX) {
|
||||
logout("disk size is 0x%" PRIx64 ", max supported is 0x%" PRIx64,
|
||||
header.disk_size, VDI_DISK_SIZE_MAX);
|
||||
ret = -ENOTSUP;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (header.disk_size % SECTOR_SIZE != 0) {
|
||||
/* 'VBoxManage convertfromraw' can create images with odd disk sizes.
|
||||
We accept them but round the disk size to the next multiple of
|
||||
|
@ -416,7 +428,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
logout("unsupported sector size %u B\n", header.sector_size);
|
||||
ret = -ENOTSUP;
|
||||
goto fail;
|
||||
} else if (header.block_size != 1 * MiB) {
|
||||
} else if (header.block_size != DEFAULT_CLUSTER_SIZE) {
|
||||
logout("unsupported block size %u B\n", header.block_size);
|
||||
ret = -ENOTSUP;
|
||||
goto fail;
|
||||
|
@ -433,6 +445,11 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
logout("parent uuid != 0, unsupported\n");
|
||||
ret = -ENOTSUP;
|
||||
goto fail;
|
||||
} else if (header.blocks_in_image > VDI_BLOCKS_IN_IMAGE_MAX) {
|
||||
logout("too many blocks %u, max is %u)",
|
||||
header.blocks_in_image, VDI_BLOCKS_IN_IMAGE_MAX);
|
||||
ret = -ENOTSUP;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bs->total_sectors = header.disk_size / SECTOR_SIZE;
|
||||
|
@ -681,11 +698,20 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options,
|
|||
options++;
|
||||
}
|
||||
|
||||
if (bytes > VDI_DISK_SIZE_MAX) {
|
||||
result = -ENOTSUP;
|
||||
logout("image size (size is 0x%" PRIx64
|
||||
", max supported is 0x%" PRIx64 ")",
|
||||
bytes, VDI_DISK_SIZE_MAX);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
fd = qemu_open(filename,
|
||||
O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
|
||||
0644);
|
||||
if (fd < 0) {
|
||||
return -errno;
|
||||
result = -errno;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* We need enough blocks to store the given disk size,
|
||||
|
@ -746,6 +772,7 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options,
|
|||
result = -errno;
|
||||
}
|
||||
|
||||
exit:
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
12
block/vhdx.c
12
block/vhdx.c
|
@ -785,12 +785,20 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
|
|||
le32_to_cpus(&s->logical_sector_size);
|
||||
le32_to_cpus(&s->physical_sector_size);
|
||||
|
||||
if (s->logical_sector_size == 0 || s->params.block_size == 0) {
|
||||
if (s->params.block_size < VHDX_BLOCK_SIZE_MIN ||
|
||||
s->params.block_size > VHDX_BLOCK_SIZE_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* both block_size and sector_size are guaranteed powers of 2 */
|
||||
/* only 2 supported sector sizes */
|
||||
if (s->logical_sector_size != 512 && s->logical_sector_size != 4096) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Both block_size and sector_size are guaranteed powers of 2, below.
|
||||
Due to range checks above, s->sectors_per_block can never be < 256 */
|
||||
s->sectors_per_block = s->params.block_size / s->logical_sector_size;
|
||||
s->chunk_ratio = (VHDX_MAX_SECTORS_PER_BLOCK) *
|
||||
(uint64_t)s->logical_sector_size /
|
||||
|
|
|
@ -1689,7 +1689,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
|
|||
}
|
||||
if (backing_file) {
|
||||
BlockDriverState *bs = bdrv_new("");
|
||||
ret = bdrv_open(bs, backing_file, NULL, 0, NULL, errp);
|
||||
ret = bdrv_open(bs, backing_file, NULL, BDRV_O_NO_BACKING, NULL, errp);
|
||||
if (ret != 0) {
|
||||
bdrv_unref(bs);
|
||||
return ret;
|
||||
|
|
32
block/vpc.c
32
block/vpc.c
|
@ -45,6 +45,8 @@ enum vhd_type {
|
|||
// Seconds since Jan 1, 2000 0:00:00 (UTC)
|
||||
#define VHD_TIMESTAMP_BASE 946684800
|
||||
|
||||
#define VHD_MAX_SECTORS (65535LL * 255 * 255)
|
||||
|
||||
// always big-endian
|
||||
typedef struct vhd_footer {
|
||||
char creator[8]; // "conectix"
|
||||
|
@ -164,6 +166,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
VHDDynDiskHeader *dyndisk_header;
|
||||
uint8_t buf[HEADER_SIZE];
|
||||
uint32_t checksum;
|
||||
uint64_t computed_size;
|
||||
int disk_type = VHD_DYNAMIC;
|
||||
int ret;
|
||||
|
||||
|
@ -221,7 +224,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
|
||||
/* Allow a maximum disk size of approximately 2 TB */
|
||||
if (bs->total_sectors >= 65535LL * 255 * 255) {
|
||||
if (bs->total_sectors >= VHD_MAX_SECTORS) {
|
||||
ret = -EFBIG;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -241,10 +244,31 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
}
|
||||
|
||||
s->block_size = be32_to_cpu(dyndisk_header->block_size);
|
||||
if (!is_power_of_2(s->block_size) || s->block_size < BDRV_SECTOR_SIZE) {
|
||||
error_setg(errp, "Invalid block size %" PRIu32, s->block_size);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;
|
||||
|
||||
s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);
|
||||
s->pagetable = g_malloc(s->max_table_entries * 4);
|
||||
|
||||
if ((bs->total_sectors * 512) / s->block_size > 0xffffffffU) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (s->max_table_entries > (VHD_MAX_SECTORS * 512) / s->block_size) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
computed_size = (uint64_t) s->max_table_entries * s->block_size;
|
||||
if (computed_size < bs->total_sectors * 512) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->pagetable = qemu_blockalign(bs, s->max_table_entries * 4);
|
||||
|
||||
s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
|
||||
|
||||
|
@ -297,7 +321,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
return 0;
|
||||
|
||||
fail:
|
||||
g_free(s->pagetable);
|
||||
qemu_vfree(s->pagetable);
|
||||
#ifdef CACHE
|
||||
g_free(s->pageentry_u8);
|
||||
#endif
|
||||
|
@ -819,7 +843,7 @@ static int vpc_has_zero_init(BlockDriverState *bs)
|
|||
static void vpc_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
g_free(s->pagetable);
|
||||
qemu_vfree(s->pagetable);
|
||||
#ifdef CACHE
|
||||
g_free(s->pageentry_u8);
|
||||
#endif
|
||||
|
|
|
@ -788,7 +788,9 @@ static int read_directory(BDRVVVFATState* s, int mapping_index)
|
|||
s->current_mapping->path=buffer;
|
||||
s->current_mapping->read_only =
|
||||
(st.st_mode & (S_IWUSR | S_IWGRP | S_IWOTH)) == 0;
|
||||
}
|
||||
} else {
|
||||
g_free(buffer);
|
||||
}
|
||||
}
|
||||
closedir(dir);
|
||||
|
||||
|
@ -1866,7 +1868,7 @@ static int check_directory_consistency(BDRVVVFATState *s,
|
|||
|
||||
if (s->used_clusters[cluster_num] & USED_ANY) {
|
||||
fprintf(stderr, "cluster %d used more than once\n", (int)cluster_num);
|
||||
return 0;
|
||||
goto fail;
|
||||
}
|
||||
s->used_clusters[cluster_num] = USED_DIRECTORY;
|
||||
|
||||
|
|
|
@ -27,8 +27,9 @@ static void nbd_accept(void *opaque)
|
|||
socklen_t addr_len = sizeof(addr);
|
||||
|
||||
int fd = accept(server_fd, (struct sockaddr *)&addr, &addr_len);
|
||||
if (fd >= 0) {
|
||||
nbd_client_new(NULL, fd, nbd_client_put);
|
||||
if (fd >= 0 && !nbd_client_new(NULL, fd, nbd_client_put)) {
|
||||
shutdown(fd, 2);
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,6 +92,10 @@ void qmp_nbd_server_add(const char *device, bool has_writable, bool writable,
|
|||
error_set(errp, QERR_DEVICE_NOT_FOUND, device);
|
||||
return;
|
||||
}
|
||||
if (!bdrv_is_inserted(bs)) {
|
||||
error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!has_writable) {
|
||||
writable = false;
|
||||
|
|
11
blockdev.c
11
blockdev.c
|
@ -340,7 +340,7 @@ static DriveInfo *blockdev_init(QDict *bs_opts,
|
|||
opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error);
|
||||
if (error_is_set(&error)) {
|
||||
error_propagate(errp, error);
|
||||
return NULL;
|
||||
goto err_no_opts;
|
||||
}
|
||||
|
||||
qemu_opts_absorb_qdict(opts, bs_opts, &error);
|
||||
|
@ -544,8 +544,9 @@ err:
|
|||
QTAILQ_REMOVE(&drives, dinfo, next);
|
||||
g_free(dinfo);
|
||||
early_err:
|
||||
QDECREF(bs_opts);
|
||||
qemu_opts_del(opts);
|
||||
err_no_opts:
|
||||
QDECREF(bs_opts);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -876,6 +877,7 @@ DriveInfo *drive_init(QemuOpts *all_opts, BlockInterfaceType block_default_type)
|
|||
|
||||
/* Actual block device init: Functionality shared with blockdev-add */
|
||||
dinfo = blockdev_init(bs_opts, type, &local_err);
|
||||
bs_opts = NULL;
|
||||
if (dinfo == NULL) {
|
||||
if (error_is_set(&local_err)) {
|
||||
qerror_report_err(local_err);
|
||||
|
@ -912,6 +914,7 @@ DriveInfo *drive_init(QemuOpts *all_opts, BlockInterfaceType block_default_type)
|
|||
|
||||
fail:
|
||||
qemu_opts_del(legacy_opts);
|
||||
QDECREF(bs_opts);
|
||||
return dinfo;
|
||||
}
|
||||
|
||||
|
@ -1795,6 +1798,10 @@ void qmp_block_commit(const char *device,
|
|||
*/
|
||||
BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
|
||||
|
||||
if (!has_speed) {
|
||||
speed = 0;
|
||||
}
|
||||
|
||||
/* drain all i/o before commits */
|
||||
bdrv_drain_all();
|
||||
|
||||
|
|
|
@ -1357,6 +1357,11 @@ EOF
|
|||
pie="no"
|
||||
fi
|
||||
fi
|
||||
|
||||
if compile_prog "-fno-pie" "-nopie"; then
|
||||
CFLAGS_NOPIE="-fno-pie"
|
||||
LDFLAGS_NOPIE="-nopie"
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
|
@ -3539,6 +3544,11 @@ fi
|
|||
|
||||
int128=no
|
||||
cat > $TMPC << EOF
|
||||
#if defined(__clang_major__) && defined(__clang_minor__)
|
||||
# if ((__clang_major__ < 3) || (__clang_major__ == 3) && (__clang_minor__ < 2))
|
||||
# error __int128_t does not work in CLANG before 3.2
|
||||
# endif
|
||||
#endif
|
||||
__int128_t a;
|
||||
__uint128_t b;
|
||||
int main (void) {
|
||||
|
@ -4312,6 +4322,7 @@ echo "LD=$ld" >> $config_host_mak
|
|||
echo "WINDRES=$windres" >> $config_host_mak
|
||||
echo "LIBTOOL=$libtool" >> $config_host_mak
|
||||
echo "CFLAGS=$CFLAGS" >> $config_host_mak
|
||||
echo "CFLAGS_NOPIE=$CFLAGS_NOPIE" >> $config_host_mak
|
||||
echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak
|
||||
echo "QEMU_INCLUDES=$QEMU_INCLUDES" >> $config_host_mak
|
||||
if test "$sparse" = "yes" ; then
|
||||
|
@ -4325,6 +4336,7 @@ else
|
|||
echo "AUTOCONF_HOST := " >> $config_host_mak
|
||||
fi
|
||||
echo "LDFLAGS=$LDFLAGS" >> $config_host_mak
|
||||
echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak
|
||||
echo "LIBTOOLFLAGS=$LIBTOOLFLAGS" >> $config_host_mak
|
||||
echo "LIBS+=$LIBS" >> $config_host_mak
|
||||
echo "LIBS_TOOLS+=$libs_tools" >> $config_host_mak
|
||||
|
|
|
@ -36,8 +36,17 @@ typedef struct
|
|||
static __thread CoroutineWin32 leader;
|
||||
static __thread Coroutine *current;
|
||||
|
||||
CoroutineAction qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
||||
CoroutineAction action)
|
||||
/* This function is marked noinline to prevent GCC from inlining it
|
||||
* into coroutine_trampoline(). If we allow it to do that then it
|
||||
* hoists the code to get the address of the TLS variable "current"
|
||||
* out of the while() loop. This is an invalid transformation because
|
||||
* the SwitchToFiber() call may be called when running thread A but
|
||||
* return in thread B, and so we might be in a different thread
|
||||
* context each time round the loop.
|
||||
*/
|
||||
CoroutineAction __attribute__((noinline))
|
||||
qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
||||
CoroutineAction action)
|
||||
{
|
||||
CoroutineWin32 *from = DO_UPCAST(CoroutineWin32, base, from_);
|
||||
CoroutineWin32 *to = DO_UPCAST(CoroutineWin32, base, to_);
|
||||
|
|
6
cputlb.c
6
cputlb.c
|
@ -344,8 +344,10 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
|||
}
|
||||
|
||||
#define MMUSUFFIX _cmmu
|
||||
#undef GETPC
|
||||
#define GETPC() ((uintptr_t)0)
|
||||
#undef GETPC_ADJ
|
||||
#define GETPC_ADJ 0
|
||||
#undef GETRA
|
||||
#define GETRA() ((uintptr_t)0)
|
||||
#define SOFTMMU_CODE_ACCESS
|
||||
|
||||
#define SHIFT 0
|
||||
|
|
|
@ -139,7 +139,6 @@ static const VMStateDescription vmstate_kbd = {
|
|||
.name = "pckbd",
|
||||
.version_id = 3,
|
||||
.minimum_version_id = 3,
|
||||
.minimum_version_id_old = 3,
|
||||
.fields = (VMStateField []) {
|
||||
VMSTATE_UINT8(write_cmd, KBDState),
|
||||
VMSTATE_UINT8(status, KBDState),
|
||||
|
@ -168,12 +167,13 @@ You can see that there are several version fields:
|
|||
- minimum_version_id: the minimum version_id that VMState is able to understand
|
||||
for that device.
|
||||
- minimum_version_id_old: For devices that were not able to port to vmstate, we can
|
||||
assign a function that knows how to read this old state.
|
||||
assign a function that knows how to read this old state. This field is
|
||||
ignored if there is no load_state_old handler.
|
||||
|
||||
So, VMState is able to read versions from minimum_version_id to
|
||||
version_id. And the function load_state_old() is able to load state
|
||||
from minimum_version_id_old to minimum_version_id. This function is
|
||||
deprecated and will be removed when no more users are left.
|
||||
version_id. And the function load_state_old() (if present) is able to
|
||||
load state from minimum_version_id_old to minimum_version_id. This
|
||||
function is deprecated and will be removed when no more users are left.
|
||||
|
||||
=== Massaging functions ===
|
||||
|
||||
|
@ -255,7 +255,6 @@ const VMStateDescription vmstate_ide_drive_pio_state = {
|
|||
.name = "ide_drive/pio_state",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.pre_save = ide_drive_pio_pre_save,
|
||||
.post_load = ide_drive_pio_post_load,
|
||||
.fields = (VMStateField []) {
|
||||
|
@ -275,7 +274,6 @@ const VMStateDescription vmstate_ide_drive = {
|
|||
.name = "ide_drive",
|
||||
.version_id = 3,
|
||||
.minimum_version_id = 0,
|
||||
.minimum_version_id_old = 0,
|
||||
.post_load = ide_drive_post_load,
|
||||
.fields = (VMStateField []) {
|
||||
.... several fields ....
|
||||
|
|
230
exec.c
230
exec.c
|
@ -83,20 +83,37 @@ int use_icount;
|
|||
typedef struct PhysPageEntry PhysPageEntry;
|
||||
|
||||
struct PhysPageEntry {
|
||||
uint16_t is_leaf : 1;
|
||||
/* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
|
||||
/* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
|
||||
uint16_t skip : 1;
|
||||
/* index into phys_sections (!skip) or phys_map_nodes (skip) */
|
||||
uint16_t ptr : 15;
|
||||
};
|
||||
|
||||
typedef PhysPageEntry Node[L2_SIZE];
|
||||
/* Size of the L2 (and L3, etc) page tables. */
|
||||
#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
||||
|
||||
#define P_L2_BITS 10
|
||||
#define P_L2_SIZE (1 << P_L2_BITS)
|
||||
|
||||
#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
|
||||
|
||||
typedef PhysPageEntry Node[P_L2_SIZE];
|
||||
|
||||
typedef struct PhysPageMap {
|
||||
unsigned sections_nb;
|
||||
unsigned sections_nb_alloc;
|
||||
unsigned nodes_nb;
|
||||
unsigned nodes_nb_alloc;
|
||||
Node *nodes;
|
||||
MemoryRegionSection *sections;
|
||||
} PhysPageMap;
|
||||
|
||||
struct AddressSpaceDispatch {
|
||||
/* This is a multi-level map on the physical address space.
|
||||
* The bottom level has pointers to MemoryRegionSections.
|
||||
*/
|
||||
PhysPageEntry phys_map;
|
||||
Node *nodes;
|
||||
MemoryRegionSection *sections;
|
||||
PhysPageMap map;
|
||||
AddressSpace *as;
|
||||
};
|
||||
|
||||
|
@ -113,18 +130,6 @@ typedef struct subpage_t {
|
|||
#define PHYS_SECTION_ROM 2
|
||||
#define PHYS_SECTION_WATCH 3
|
||||
|
||||
typedef struct PhysPageMap {
|
||||
unsigned sections_nb;
|
||||
unsigned sections_nb_alloc;
|
||||
unsigned nodes_nb;
|
||||
unsigned nodes_nb_alloc;
|
||||
Node *nodes;
|
||||
MemoryRegionSection *sections;
|
||||
} PhysPageMap;
|
||||
|
||||
static PhysPageMap *prev_map;
|
||||
static PhysPageMap next_map;
|
||||
|
||||
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
|
||||
|
||||
static void io_mem_init(void);
|
||||
|
@ -135,63 +140,60 @@ static MemoryRegion io_mem_watch;
|
|||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
static void phys_map_node_reserve(unsigned nodes)
|
||||
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
|
||||
{
|
||||
if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
|
||||
next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
|
||||
16);
|
||||
next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
|
||||
next_map.nodes_nb + nodes);
|
||||
next_map.nodes = g_renew(Node, next_map.nodes,
|
||||
next_map.nodes_nb_alloc);
|
||||
if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
|
||||
map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
|
||||
map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
|
||||
map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
|
||||
}
|
||||
}
|
||||
|
||||
static uint16_t phys_map_node_alloc(void)
|
||||
static uint16_t phys_map_node_alloc(PhysPageMap *map)
|
||||
{
|
||||
unsigned i;
|
||||
uint16_t ret;
|
||||
|
||||
ret = next_map.nodes_nb++;
|
||||
ret = map->nodes_nb++;
|
||||
assert(ret != PHYS_MAP_NODE_NIL);
|
||||
assert(ret != next_map.nodes_nb_alloc);
|
||||
for (i = 0; i < L2_SIZE; ++i) {
|
||||
next_map.nodes[ret][i].is_leaf = 0;
|
||||
next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
|
||||
assert(ret != map->nodes_nb_alloc);
|
||||
for (i = 0; i < P_L2_SIZE; ++i) {
|
||||
map->nodes[ret][i].skip = 1;
|
||||
map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
|
||||
hwaddr *nb, uint16_t leaf,
|
||||
static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
|
||||
hwaddr *index, hwaddr *nb, uint16_t leaf,
|
||||
int level)
|
||||
{
|
||||
PhysPageEntry *p;
|
||||
int i;
|
||||
hwaddr step = (hwaddr)1 << (level * L2_BITS);
|
||||
hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
|
||||
|
||||
if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
|
||||
lp->ptr = phys_map_node_alloc();
|
||||
p = next_map.nodes[lp->ptr];
|
||||
if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
|
||||
lp->ptr = phys_map_node_alloc(map);
|
||||
p = map->nodes[lp->ptr];
|
||||
if (level == 0) {
|
||||
for (i = 0; i < L2_SIZE; i++) {
|
||||
p[i].is_leaf = 1;
|
||||
for (i = 0; i < P_L2_SIZE; i++) {
|
||||
p[i].skip = 0;
|
||||
p[i].ptr = PHYS_SECTION_UNASSIGNED;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
p = next_map.nodes[lp->ptr];
|
||||
p = map->nodes[lp->ptr];
|
||||
}
|
||||
lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
|
||||
lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
|
||||
|
||||
while (*nb && lp < &p[L2_SIZE]) {
|
||||
while (*nb && lp < &p[P_L2_SIZE]) {
|
||||
if ((*index & (step - 1)) == 0 && *nb >= step) {
|
||||
lp->is_leaf = true;
|
||||
lp->skip = 0;
|
||||
lp->ptr = leaf;
|
||||
*index += step;
|
||||
*nb -= step;
|
||||
} else {
|
||||
phys_page_set_level(lp, index, nb, leaf, level - 1);
|
||||
phys_page_set_level(map, lp, index, nb, leaf, level - 1);
|
||||
}
|
||||
++lp;
|
||||
}
|
||||
|
@ -202,23 +204,24 @@ static void phys_page_set(AddressSpaceDispatch *d,
|
|||
uint16_t leaf)
|
||||
{
|
||||
/* Wildly overreserve - it doesn't matter much. */
|
||||
phys_map_node_reserve(3 * P_L2_LEVELS);
|
||||
phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
|
||||
|
||||
phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
|
||||
phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
|
||||
}
|
||||
|
||||
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
|
||||
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
|
||||
Node *nodes, MemoryRegionSection *sections)
|
||||
{
|
||||
PhysPageEntry *p;
|
||||
hwaddr index = addr >> TARGET_PAGE_BITS;
|
||||
int i;
|
||||
|
||||
for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
|
||||
for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
|
||||
if (lp.ptr == PHYS_MAP_NODE_NIL) {
|
||||
return §ions[PHYS_SECTION_UNASSIGNED];
|
||||
}
|
||||
p = nodes[lp.ptr];
|
||||
lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
|
||||
lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
|
||||
}
|
||||
return §ions[lp.ptr];
|
||||
}
|
||||
|
@ -236,11 +239,10 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
|
|||
MemoryRegionSection *section;
|
||||
subpage_t *subpage;
|
||||
|
||||
section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
|
||||
d->nodes, d->sections);
|
||||
section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
|
||||
if (resolve_subpage && section->mr->subpage) {
|
||||
subpage = container_of(section->mr, subpage_t, iomem);
|
||||
section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
|
||||
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
|
||||
}
|
||||
return section;
|
||||
}
|
||||
|
@ -264,6 +266,18 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
|
|||
return section;
|
||||
}
|
||||
|
||||
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
|
||||
{
|
||||
if (memory_region_is_ram(mr)) {
|
||||
return !(is_write && mr->readonly);
|
||||
}
|
||||
if (memory_region_is_romd(mr)) {
|
||||
return !is_write;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||
hwaddr *xlat, hwaddr *plen,
|
||||
bool is_write)
|
||||
|
@ -293,6 +307,11 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
|||
as = iotlb.target_as;
|
||||
}
|
||||
|
||||
if (memory_access_is_direct(mr, is_write)) {
|
||||
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
|
||||
len = MIN(page, len);
|
||||
}
|
||||
|
||||
*plen = len;
|
||||
*xlat = addr;
|
||||
return mr;
|
||||
|
@ -708,7 +727,7 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
|
|||
iotlb |= PHYS_SECTION_ROM;
|
||||
}
|
||||
} else {
|
||||
iotlb = section - address_space_memory.dispatch->sections;
|
||||
iotlb = section - address_space_memory.dispatch->map.sections;
|
||||
iotlb += xlat;
|
||||
}
|
||||
|
||||
|
@ -747,23 +766,23 @@ void phys_mem_set_alloc(void *(*alloc)(size_t))
|
|||
phys_mem_alloc = alloc;
|
||||
}
|
||||
|
||||
static uint16_t phys_section_add(MemoryRegionSection *section)
|
||||
static uint16_t phys_section_add(PhysPageMap *map,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/* The physical section number is ORed with a page-aligned
|
||||
* pointer to produce the iotlb entries. Thus it should
|
||||
* never overflow into the page-aligned value.
|
||||
*/
|
||||
assert(next_map.sections_nb < TARGET_PAGE_SIZE);
|
||||
assert(map->sections_nb < TARGET_PAGE_SIZE);
|
||||
|
||||
if (next_map.sections_nb == next_map.sections_nb_alloc) {
|
||||
next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
|
||||
16);
|
||||
next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
|
||||
next_map.sections_nb_alloc);
|
||||
if (map->sections_nb == map->sections_nb_alloc) {
|
||||
map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
|
||||
map->sections = g_renew(MemoryRegionSection, map->sections,
|
||||
map->sections_nb_alloc);
|
||||
}
|
||||
next_map.sections[next_map.sections_nb] = *section;
|
||||
map->sections[map->sections_nb] = *section;
|
||||
memory_region_ref(section->mr);
|
||||
return next_map.sections_nb++;
|
||||
return map->sections_nb++;
|
||||
}
|
||||
|
||||
static void phys_section_destroy(MemoryRegion *mr)
|
||||
|
@ -785,7 +804,6 @@ static void phys_sections_free(PhysPageMap *map)
|
|||
}
|
||||
g_free(map->sections);
|
||||
g_free(map->nodes);
|
||||
g_free(map);
|
||||
}
|
||||
|
||||
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
|
||||
|
@ -793,8 +811,8 @@ static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *secti
|
|||
subpage_t *subpage;
|
||||
hwaddr base = section->offset_within_address_space
|
||||
& TARGET_PAGE_MASK;
|
||||
MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
|
||||
next_map.nodes, next_map.sections);
|
||||
MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
|
||||
d->map.nodes, d->map.sections);
|
||||
MemoryRegionSection subsection = {
|
||||
.offset_within_address_space = base,
|
||||
.size = int128_make64(TARGET_PAGE_SIZE),
|
||||
|
@ -807,13 +825,14 @@ static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *secti
|
|||
subpage = subpage_init(d->as, base);
|
||||
subsection.mr = &subpage->iomem;
|
||||
phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
|
||||
phys_section_add(&subsection));
|
||||
phys_section_add(&d->map, &subsection));
|
||||
} else {
|
||||
subpage = container_of(existing->mr, subpage_t, iomem);
|
||||
}
|
||||
start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
|
||||
end = start + int128_get64(section->size) - 1;
|
||||
subpage_register(subpage, start, end, phys_section_add(section));
|
||||
subpage_register(subpage, start, end,
|
||||
phys_section_add(&d->map, section));
|
||||
}
|
||||
|
||||
|
||||
|
@ -821,7 +840,7 @@ static void register_multipage(AddressSpaceDispatch *d,
|
|||
MemoryRegionSection *section)
|
||||
{
|
||||
hwaddr start_addr = section->offset_within_address_space;
|
||||
uint16_t section_index = phys_section_add(section);
|
||||
uint16_t section_index = phys_section_add(&d->map, section);
|
||||
uint64_t num_pages = int128_get64(int128_rshift(section->size,
|
||||
TARGET_PAGE_BITS));
|
||||
|
||||
|
@ -1605,7 +1624,7 @@ static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
|
|||
return mmio;
|
||||
}
|
||||
|
||||
static uint16_t dummy_section(MemoryRegion *mr)
|
||||
static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
|
||||
{
|
||||
MemoryRegionSection section = {
|
||||
.mr = mr,
|
||||
|
@ -1614,12 +1633,13 @@ static uint16_t dummy_section(MemoryRegion *mr)
|
|||
.size = int128_2_64(),
|
||||
};
|
||||
|
||||
return phys_section_add(§ion);
|
||||
return phys_section_add(map, §ion);
|
||||
}
|
||||
|
||||
MemoryRegion *iotlb_to_region(hwaddr index)
|
||||
{
|
||||
return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
|
||||
return address_space_memory.dispatch->map.sections[
|
||||
index & ~TARGET_PAGE_MASK].mr;
|
||||
}
|
||||
|
||||
static void io_mem_init(void)
|
||||
|
@ -1636,9 +1656,19 @@ static void io_mem_init(void)
|
|||
static void mem_begin(MemoryListener *listener)
|
||||
{
|
||||
AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
|
||||
AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
|
||||
AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
|
||||
uint16_t n;
|
||||
|
||||
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
|
||||
n = dummy_section(&d->map, &io_mem_unassigned);
|
||||
assert(n == PHYS_SECTION_UNASSIGNED);
|
||||
n = dummy_section(&d->map, &io_mem_notdirty);
|
||||
assert(n == PHYS_SECTION_NOTDIRTY);
|
||||
n = dummy_section(&d->map, &io_mem_rom);
|
||||
assert(n == PHYS_SECTION_ROM);
|
||||
n = dummy_section(&d->map, &io_mem_watch);
|
||||
assert(n == PHYS_SECTION_WATCH);
|
||||
|
||||
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
|
||||
d->as = as;
|
||||
as->next_dispatch = d;
|
||||
}
|
||||
|
@ -1649,37 +1679,12 @@ static void mem_commit(MemoryListener *listener)
|
|||
AddressSpaceDispatch *cur = as->dispatch;
|
||||
AddressSpaceDispatch *next = as->next_dispatch;
|
||||
|
||||
next->nodes = next_map.nodes;
|
||||
next->sections = next_map.sections;
|
||||
|
||||
as->dispatch = next;
|
||||
g_free(cur);
|
||||
}
|
||||
|
||||
static void core_begin(MemoryListener *listener)
|
||||
{
|
||||
uint16_t n;
|
||||
|
||||
prev_map = g_new(PhysPageMap, 1);
|
||||
*prev_map = next_map;
|
||||
|
||||
memset(&next_map, 0, sizeof(next_map));
|
||||
n = dummy_section(&io_mem_unassigned);
|
||||
assert(n == PHYS_SECTION_UNASSIGNED);
|
||||
n = dummy_section(&io_mem_notdirty);
|
||||
assert(n == PHYS_SECTION_NOTDIRTY);
|
||||
n = dummy_section(&io_mem_rom);
|
||||
assert(n == PHYS_SECTION_ROM);
|
||||
n = dummy_section(&io_mem_watch);
|
||||
assert(n == PHYS_SECTION_WATCH);
|
||||
}
|
||||
|
||||
/* This listener's commit run after the other AddressSpaceDispatch listeners'.
|
||||
* All AddressSpaceDispatch instances have switched to the next map.
|
||||
*/
|
||||
static void core_commit(MemoryListener *listener)
|
||||
{
|
||||
phys_sections_free(prev_map);
|
||||
if (cur) {
|
||||
phys_sections_free(&cur->map);
|
||||
g_free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcg_commit(MemoryListener *listener)
|
||||
|
@ -1707,8 +1712,6 @@ static void core_log_global_stop(MemoryListener *listener)
|
|||
}
|
||||
|
||||
static MemoryListener core_memory_listener = {
|
||||
.begin = core_begin,
|
||||
.commit = core_commit,
|
||||
.log_global_start = core_log_global_start,
|
||||
.log_global_stop = core_log_global_stop,
|
||||
.priority = 1,
|
||||
|
@ -1743,7 +1746,12 @@ void address_space_destroy_dispatch(AddressSpace *as)
|
|||
static void memory_map_init(void)
|
||||
{
|
||||
system_memory = g_malloc(sizeof(*system_memory));
|
||||
memory_region_init(system_memory, NULL, "system", INT64_MAX);
|
||||
|
||||
assert(ADDR_SPACE_BITS <= 64);
|
||||
|
||||
memory_region_init(system_memory, NULL, "system",
|
||||
ADDR_SPACE_BITS == 64 ?
|
||||
UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
|
||||
address_space_init(&address_space_memory, system_memory, "memory");
|
||||
|
||||
system_io = g_malloc(sizeof(*system_io));
|
||||
|
@ -1824,18 +1832,6 @@ static void invalidate_and_set_dirty(hwaddr addr,
|
|||
xen_modified_memory(addr, length);
|
||||
}
|
||||
|
||||
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
|
||||
{
|
||||
if (memory_region_is_ram(mr)) {
|
||||
return !(is_write && mr->readonly);
|
||||
}
|
||||
if (memory_region_is_romd(mr)) {
|
||||
return !is_write;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
|
||||
{
|
||||
unsigned access_size_max = mr->ops->valid.max_access_size;
|
||||
|
|
|
@ -75,9 +75,18 @@ static struct keymap map[0xE0] = {
|
|||
[0x2c] = {4,3}, /* z */
|
||||
[0xc7] = {5,0}, /* Home */
|
||||
[0x2a] = {5,1}, /* shift */
|
||||
[0x39] = {5,2}, /* space */
|
||||
/*
|
||||
* There are two matrix positions which map to space,
|
||||
* but QEMU can only use one of them for the reverse
|
||||
* mapping, so simply use the second one.
|
||||
*/
|
||||
/* [0x39] = {5,2}, space */
|
||||
[0x39] = {5,3}, /* space */
|
||||
[0x1c] = {5,5}, /* enter */
|
||||
/*
|
||||
* Matrix position {5,4} and other keys are missing here.
|
||||
* TODO: Compare with Linux code and test real hardware.
|
||||
*/
|
||||
[0x1c] = {5,5}, /* enter (TODO: might be wrong) */
|
||||
[0xc8] = {6,0}, /* up */
|
||||
[0xd0] = {6,1}, /* down */
|
||||
[0xcb] = {6,2}, /* left */
|
||||
|
|
|
@ -92,8 +92,6 @@
|
|||
#define MP_ETH_CRDP3 0x4AC
|
||||
#define MP_ETH_CTDP0 0x4E0
|
||||
#define MP_ETH_CTDP1 0x4E4
|
||||
#define MP_ETH_CTDP2 0x4E8
|
||||
#define MP_ETH_CTDP3 0x4EC
|
||||
|
||||
/* MII PHY access */
|
||||
#define MP_ETH_SMIR_DATA 0x0000FFFF
|
||||
|
@ -308,7 +306,7 @@ static uint64_t mv88w8618_eth_read(void *opaque, hwaddr offset,
|
|||
case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
|
||||
return s->rx_queue[(offset - MP_ETH_CRDP0)/4];
|
||||
|
||||
case MP_ETH_CTDP0 ... MP_ETH_CTDP3:
|
||||
case MP_ETH_CTDP0 ... MP_ETH_CTDP1:
|
||||
return s->tx_queue[(offset - MP_ETH_CTDP0)/4];
|
||||
|
||||
default:
|
||||
|
@ -362,7 +360,7 @@ static void mv88w8618_eth_write(void *opaque, hwaddr offset,
|
|||
s->cur_rx[(offset - MP_ETH_CRDP0)/4] = value;
|
||||
break;
|
||||
|
||||
case MP_ETH_CTDP0 ... MP_ETH_CTDP3:
|
||||
case MP_ETH_CTDP0 ... MP_ETH_CTDP1:
|
||||
s->tx_queue[(offset - MP_ETH_CTDP0)/4] = value;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ static void omap_timer_clk_update(void *opaque, int line, int on)
|
|||
static void omap_timer_clk_setup(struct omap_mpu_timer_s *timer)
|
||||
{
|
||||
omap_clk_adduser(timer->clk,
|
||||
qemu_allocate_irqs(omap_timer_clk_update, timer, 1)[0]);
|
||||
qemu_allocate_irq(omap_timer_clk_update, timer, 0));
|
||||
timer->rate = omap_clk_getrate(timer->clk);
|
||||
}
|
||||
|
||||
|
@ -2094,7 +2094,7 @@ static struct omap_mpuio_s *omap_mpuio_init(MemoryRegion *memory,
|
|||
"omap-mpuio", 0x800);
|
||||
memory_region_add_subregion(memory, base, &s->iomem);
|
||||
|
||||
omap_clk_adduser(clk, qemu_allocate_irqs(omap_mpuio_onoff, s, 1)[0]);
|
||||
omap_clk_adduser(clk, qemu_allocate_irq(omap_mpuio_onoff, s, 0));
|
||||
|
||||
return s;
|
||||
}
|
||||
|
@ -2397,7 +2397,7 @@ static struct omap_pwl_s *omap_pwl_init(MemoryRegion *system_memory,
|
|||
"omap-pwl", 0x800);
|
||||
memory_region_add_subregion(system_memory, base, &s->iomem);
|
||||
|
||||
omap_clk_adduser(clk, qemu_allocate_irqs(omap_pwl_clk_update, s, 1)[0]);
|
||||
omap_clk_adduser(clk, qemu_allocate_irq(omap_pwl_clk_update, s, 0));
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -3481,8 +3481,8 @@ static void omap_mcbsp_i2s_start(void *opaque, int line, int level)
|
|||
void omap_mcbsp_i2s_attach(struct omap_mcbsp_s *s, I2SCodec *slave)
|
||||
{
|
||||
s->codec = slave;
|
||||
slave->rx_swallow = qemu_allocate_irqs(omap_mcbsp_i2s_swallow, s, 1)[0];
|
||||
slave->tx_start = qemu_allocate_irqs(omap_mcbsp_i2s_start, s, 1)[0];
|
||||
slave->rx_swallow = qemu_allocate_irq(omap_mcbsp_i2s_swallow, s, 0);
|
||||
slave->tx_start = qemu_allocate_irq(omap_mcbsp_i2s_start, s, 0);
|
||||
}
|
||||
|
||||
/* LED Pulse Generators */
|
||||
|
@ -3630,7 +3630,7 @@ static struct omap_lpg_s *omap_lpg_init(MemoryRegion *system_memory,
|
|||
memory_region_init_io(&s->iomem, NULL, &omap_lpg_ops, s, "omap-lpg", 0x800);
|
||||
memory_region_add_subregion(system_memory, base, &s->iomem);
|
||||
|
||||
omap_clk_adduser(clk, qemu_allocate_irqs(omap_lpg_clk_update, s, 1)[0]);
|
||||
omap_clk_adduser(clk, qemu_allocate_irq(omap_lpg_clk_update, s, 0));
|
||||
|
||||
return s;
|
||||
}
|
||||
|
@ -3844,7 +3844,7 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *system_memory,
|
|||
s->sdram_size = sdram_size;
|
||||
s->sram_size = OMAP15XX_SRAM_SIZE;
|
||||
|
||||
s->wakeup = qemu_allocate_irqs(omap_mpu_wakeup, s, 1)[0];
|
||||
s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0);
|
||||
|
||||
/* Clocks */
|
||||
omap_clk_init(s);
|
||||
|
|
|
@ -2260,7 +2260,7 @@ struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sysmem,
|
|||
s->sdram_size = sdram_size;
|
||||
s->sram_size = OMAP242X_SRAM_SIZE;
|
||||
|
||||
s->wakeup = qemu_allocate_irqs(omap_mpu_wakeup, s, 1)[0];
|
||||
s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0);
|
||||
|
||||
/* Clocks */
|
||||
omap_clk_init(s);
|
||||
|
|
|
@ -742,7 +742,7 @@ static void pxa2xx_ssp_save(QEMUFile *f, void *opaque)
|
|||
static int pxa2xx_ssp_load(QEMUFile *f, void *opaque, int version_id)
|
||||
{
|
||||
PXA2xxSSPState *s = (PXA2xxSSPState *) opaque;
|
||||
int i;
|
||||
int i, v;
|
||||
|
||||
s->enable = qemu_get_be32(f);
|
||||
|
||||
|
@ -756,7 +756,11 @@ static int pxa2xx_ssp_load(QEMUFile *f, void *opaque, int version_id)
|
|||
qemu_get_8s(f, &s->ssrsa);
|
||||
qemu_get_8s(f, &s->ssacd);
|
||||
|
||||
s->rx_level = qemu_get_byte(f);
|
||||
v = qemu_get_byte(f);
|
||||
if (v < 0 || v > ARRAY_SIZE(s->rx_fifo)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->rx_level = v;
|
||||
s->rx_start = 0;
|
||||
for (i = 0; i < s->rx_level; i ++)
|
||||
s->rx_fifo[i] = qemu_get_byte(f);
|
||||
|
@ -2053,7 +2057,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space,
|
|||
fprintf(stderr, "Unable to find CPU definition\n");
|
||||
exit(1);
|
||||
}
|
||||
s->reset = qemu_allocate_irqs(pxa2xx_reset, s, 1)[0];
|
||||
s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0);
|
||||
|
||||
/* SDRAM & Internal Memory Storage */
|
||||
memory_region_init_ram(&s->sdram, NULL, "pxa270.sdram", sdram_size);
|
||||
|
@ -2184,7 +2188,7 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
|
|||
fprintf(stderr, "Unable to find CPU definition\n");
|
||||
exit(1);
|
||||
}
|
||||
s->reset = qemu_allocate_irqs(pxa2xx_reset, s, 1)[0];
|
||||
s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0);
|
||||
|
||||
/* SDRAM & Internal Memory Storage */
|
||||
memory_region_init_ram(&s->sdram, NULL, "pxa255.sdram", sdram_size);
|
||||
|
|
|
@ -743,7 +743,7 @@ static void spitz_i2c_setup(PXA2xxState *cpu)
|
|||
|
||||
spitz_wm8750_addr(wm, 0, 0);
|
||||
qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_WM,
|
||||
qemu_allocate_irqs(spitz_wm8750_addr, wm, 1)[0]);
|
||||
qemu_allocate_irq(spitz_wm8750_addr, wm, 0));
|
||||
/* .. and to the sound interface. */
|
||||
cpu->i2s->opaque = wm;
|
||||
cpu->i2s->codec_out = wm8750_dac_dat;
|
||||
|
@ -849,7 +849,7 @@ static void spitz_gpio_setup(PXA2xxState *cpu, int slots)
|
|||
* wouldn't guarantee that a guest ever exits the loop.
|
||||
*/
|
||||
spitz_hsync = 0;
|
||||
lcd_hsync = qemu_allocate_irqs(spitz_lcd_hsync_handler, cpu, 1)[0];
|
||||
lcd_hsync = qemu_allocate_irq(spitz_lcd_hsync_handler, cpu, 0);
|
||||
pxa2xx_gpio_read_notifier(cpu->gpio, lcd_hsync);
|
||||
pxa2xx_lcd_vsync_notifier(cpu->lcd, lcd_hsync);
|
||||
|
||||
|
|
|
@ -359,7 +359,7 @@ static void z2_init(QEMUMachineInitArgs *args)
|
|||
wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s);
|
||||
|
||||
qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS,
|
||||
qemu_allocate_irqs(z2_lcd_cs, z2_lcd, 1)[0]);
|
||||
qemu_allocate_irq(z2_lcd_cs, z2_lcd, 0));
|
||||
|
||||
z2_binfo.kernel_filename = kernel_filename;
|
||||
z2_binfo.kernel_cmdline = kernel_cmdline;
|
||||
|
|
|
@ -347,8 +347,8 @@ static void adlib_realizefn (DeviceState *dev, Error **errp)
|
|||
s->samples = AUD_get_buffer_size_out (s->voice) >> SHIFT;
|
||||
s->mixbuf = g_malloc0 (s->samples << SHIFT);
|
||||
|
||||
adlib_portio_list[1].offset = s->port;
|
||||
adlib_portio_list[2].offset = s->port + 8;
|
||||
adlib_portio_list[0].offset = s->port;
|
||||
adlib_portio_list[1].offset = s->port + 8;
|
||||
portio_list_init (port_list, OBJECT(s), adlib_portio_list, s, "adlib");
|
||||
portio_list_add (port_list, isa_address_space_io(&s->parent_obj), 0);
|
||||
}
|
||||
|
|
|
@ -444,6 +444,7 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
|
|||
}
|
||||
}
|
||||
if (d->dp_lbase & 0x01) {
|
||||
s = st - d->st;
|
||||
addr = intel_hda_addr(d->dp_lbase & ~0x01, d->dp_ubase);
|
||||
stl_le_pci_dma(&d->pci, addr + 8*s, st->lpib);
|
||||
}
|
||||
|
|
|
@ -728,20 +728,18 @@ static int virtio_blk_device_init(VirtIODevice *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_blk_device_exit(DeviceState *dev)
|
||||
static void virtio_blk_device_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VirtIOBlock *s = VIRTIO_BLK(dev);
|
||||
VirtIOBlock *s = VIRTIO_BLK(vdev);
|
||||
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
||||
remove_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
virtio_blk_data_plane_destroy(s->dataplane);
|
||||
s->dataplane = NULL;
|
||||
#endif
|
||||
qemu_del_vm_change_state_handler(s->change);
|
||||
unregister_savevm(dev, "virtio-blk", s);
|
||||
unregister_savevm(DEVICE(vdev), "virtio-blk", s);
|
||||
blockdev_mark_auto_del(s->bs);
|
||||
virtio_cleanup(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property virtio_blk_properties[] = {
|
||||
|
@ -753,10 +751,10 @@ static void virtio_blk_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = virtio_blk_device_exit;
|
||||
dc->props = virtio_blk_properties;
|
||||
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
||||
vdc->init = virtio_blk_device_init;
|
||||
vdc->exit = virtio_blk_device_exit;
|
||||
vdc->get_config = virtio_blk_update_config;
|
||||
vdc->set_config = virtio_blk_set_config;
|
||||
vdc->get_features = virtio_blk_get_features;
|
||||
|
|
|
@ -670,6 +670,7 @@ static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id)
|
|||
uint32_t max_nr_ports, nr_active_ports, ports_map;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
uint32_t tmp;
|
||||
|
||||
if (version_id > 3) {
|
||||
return -EINVAL;
|
||||
|
@ -685,17 +686,12 @@ static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* The config space */
|
||||
qemu_get_be16s(f, &s->config.cols);
|
||||
qemu_get_be16s(f, &s->config.rows);
|
||||
|
||||
qemu_get_be32s(f, &max_nr_ports);
|
||||
tswap32s(&max_nr_ports);
|
||||
if (max_nr_ports > tswap32(s->config.max_nr_ports)) {
|
||||
/* Source could have had more ports than us. Fail migration. */
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Unused */
|
||||
qemu_get_be16s(f, (uint16_t *) &tmp);
|
||||
qemu_get_be16s(f, (uint16_t *) &tmp);
|
||||
qemu_get_be32s(f, &tmp);
|
||||
|
||||
max_nr_ports = tswap32(s->config.max_nr_ports);
|
||||
for (i = 0; i < (max_nr_ports + 31) / 32; i++) {
|
||||
qemu_get_be32s(f, &ports_map);
|
||||
|
||||
|
@ -987,12 +983,11 @@ static const TypeInfo virtio_serial_port_type_info = {
|
|||
.class_init = virtio_serial_port_class_init,
|
||||
};
|
||||
|
||||
static int virtio_serial_device_exit(DeviceState *dev)
|
||||
static void virtio_serial_device_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VirtIOSerial *vser = VIRTIO_SERIAL(vdev);
|
||||
|
||||
unregister_savevm(dev, "virtio-console", vser);
|
||||
unregister_savevm(DEVICE(vdev), "virtio-console", vser);
|
||||
|
||||
g_free(vser->ivqs);
|
||||
g_free(vser->ovqs);
|
||||
|
@ -1004,7 +999,6 @@ static int virtio_serial_device_exit(DeviceState *dev)
|
|||
g_free(vser->post_load);
|
||||
}
|
||||
virtio_cleanup(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property virtio_serial_properties[] = {
|
||||
|
@ -1016,10 +1010,10 @@ static void virtio_serial_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = virtio_serial_device_exit;
|
||||
dc->props = virtio_serial_properties;
|
||||
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
|
||||
vdc->init = virtio_serial_device_init;
|
||||
vdc->exit = virtio_serial_device_exit;
|
||||
vdc->get_features = get_features;
|
||||
vdc->get_config = get_config;
|
||||
vdc->set_config = set_config;
|
||||
|
|
|
@ -102,7 +102,7 @@ qemu_irq qemu_irq_invert(qemu_irq irq)
|
|||
{
|
||||
/* The default state for IRQs is low, so raise the output now. */
|
||||
qemu_irq_raise(irq);
|
||||
return qemu_allocate_irqs(qemu_notirq, irq, 1)[0];
|
||||
return qemu_allocate_irq(qemu_notirq, irq, 0);
|
||||
}
|
||||
|
||||
static void qemu_splitirq(void *opaque, int line, int level)
|
||||
|
@ -117,7 +117,7 @@ qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2)
|
|||
qemu_irq *s = g_malloc0(2 * sizeof(qemu_irq));
|
||||
s[0] = irq1;
|
||||
s[1] = irq2;
|
||||
return qemu_allocate_irqs(qemu_splitirq, s, 1)[0];
|
||||
return qemu_allocate_irq(qemu_splitirq, s, 0);
|
||||
}
|
||||
|
||||
static void proxy_irq_handler(void *opaque, int n, int level)
|
||||
|
|
|
@ -312,18 +312,42 @@ static int ssd0323_load(QEMUFile *f, void *opaque, int version_id)
|
|||
return -EINVAL;
|
||||
|
||||
s->cmd_len = qemu_get_be32(f);
|
||||
if (s->cmd_len < 0 || s->cmd_len > ARRAY_SIZE(s->cmd_data)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->cmd = qemu_get_be32(f);
|
||||
for (i = 0; i < 8; i++)
|
||||
s->cmd_data[i] = qemu_get_be32(f);
|
||||
s->row = qemu_get_be32(f);
|
||||
if (s->row < 0 || s->row >= 80) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->row_start = qemu_get_be32(f);
|
||||
if (s->row_start < 0 || s->row_start >= 80) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->row_end = qemu_get_be32(f);
|
||||
if (s->row_end < 0 || s->row_end >= 80) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->col = qemu_get_be32(f);
|
||||
if (s->col < 0 || s->col >= 64) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->col_start = qemu_get_be32(f);
|
||||
if (s->col_start < 0 || s->col_start >= 64) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->col_end = qemu_get_be32(f);
|
||||
if (s->col_end < 0 || s->col_end >= 64) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->redraw = qemu_get_be32(f);
|
||||
s->remap = qemu_get_be32(f);
|
||||
s->mode = qemu_get_be32(f);
|
||||
if (s->mode != SSD0323_CMD && s->mode != SSD0323_DATA) {
|
||||
return -EINVAL;
|
||||
}
|
||||
qemu_get_buffer(f, s->framebuffer, sizeof(s->framebuffer));
|
||||
|
||||
ss->cs = qemu_get_be32(f);
|
||||
|
|
|
@ -1660,7 +1660,7 @@ struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs,
|
|||
}
|
||||
|
||||
omap_dma_setcaps(s);
|
||||
omap_clk_adduser(s->clk, qemu_allocate_irqs(omap_dma_clk_update, s, 1)[0]);
|
||||
omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
|
||||
omap_dma_reset(s->dma);
|
||||
omap_dma_clk_update(s, 0, 1);
|
||||
|
||||
|
@ -2082,7 +2082,7 @@ struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs,
|
|||
s->intr_update = omap_dma_interrupts_4_update;
|
||||
|
||||
omap_dma_setcaps(s);
|
||||
omap_clk_adduser(s->clk, qemu_allocate_irqs(omap_dma_clk_update, s, 1)[0]);
|
||||
omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
|
||||
omap_dma_reset(s->dma);
|
||||
omap_dma_clk_update(s, 0, !!s->dma->freq);
|
||||
|
||||
|
|
|
@ -203,6 +203,15 @@ static bool is_version_0 (void *opaque, int version_id)
|
|||
return version_id == 0;
|
||||
}
|
||||
|
||||
static bool vmstate_scoop_validate(void *opaque, int version_id)
|
||||
{
|
||||
ScoopInfo *s = opaque;
|
||||
|
||||
return !(s->prev_level & 0xffff0000) &&
|
||||
!(s->gpio_level & 0xffff0000) &&
|
||||
!(s->gpio_dir & 0xffff0000);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_scoop_regs = {
|
||||
.name = "scoop",
|
||||
.version_id = 1,
|
||||
|
@ -215,6 +224,7 @@ static const VMStateDescription vmstate_scoop_regs = {
|
|||
VMSTATE_UINT32(gpio_level, ScoopInfo),
|
||||
VMSTATE_UINT32(gpio_dir, ScoopInfo),
|
||||
VMSTATE_UINT32(prev_level, ScoopInfo),
|
||||
VMSTATE_VALIDATE("irq levels are 16 bit", vmstate_scoop_validate),
|
||||
VMSTATE_UINT16(mcr, ScoopInfo),
|
||||
VMSTATE_UINT16(cdr, ScoopInfo),
|
||||
VMSTATE_UINT16(ccr, ScoopInfo),
|
||||
|
|
|
@ -1075,15 +1075,16 @@ void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables)
|
|||
/* ACPI tables pointed to by RSDT */
|
||||
acpi_add_table(table_offsets, tables->table_data);
|
||||
build_fadt(tables->table_data, tables->linker, &pm, facs, dsdt);
|
||||
acpi_add_table(table_offsets, tables->table_data);
|
||||
|
||||
acpi_add_table(table_offsets, tables->table_data);
|
||||
build_ssdt(tables->table_data, tables->linker, &cpu, &pm, &misc, &pci,
|
||||
guest_info);
|
||||
acpi_add_table(table_offsets, tables->table_data);
|
||||
|
||||
build_madt(tables->table_data, tables->linker, &cpu, guest_info);
|
||||
acpi_add_table(table_offsets, tables->table_data);
|
||||
build_madt(tables->table_data, tables->linker, &cpu, guest_info);
|
||||
|
||||
if (misc.has_hpet) {
|
||||
acpi_add_table(table_offsets, tables->table_data);
|
||||
build_hpet(tables->table_data, tables->linker);
|
||||
}
|
||||
if (guest_info->numa_nodes) {
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "bios-linker-loader.h"
|
||||
#include "hw/nvram/fw_cfg.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
#define BIOS_LINKER_LOADER_FILESZ FW_CFG_MAX_FILE_PATH
|
||||
|
|
|
@ -1257,6 +1257,7 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev)
|
|||
if (pos != 0 && kvm_device_msix_supported(kvm_state)) {
|
||||
int bar_nr;
|
||||
uint32_t msix_table_entry;
|
||||
uint16_t msix_max;
|
||||
|
||||
if (!check_irqchip_in_kernel()) {
|
||||
return -ENOTSUP;
|
||||
|
@ -1268,9 +1269,10 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev)
|
|||
}
|
||||
pci_dev->msix_cap = pos;
|
||||
|
||||
pci_set_word(pci_dev->config + pos + PCI_MSIX_FLAGS,
|
||||
pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS) &
|
||||
PCI_MSIX_FLAGS_QSIZE);
|
||||
msix_max = (pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS) &
|
||||
PCI_MSIX_FLAGS_QSIZE) + 1;
|
||||
msix_max = MIN(msix_max, KVM_MAX_MSIX_PER_DEV);
|
||||
pci_set_word(pci_dev->config + pos + PCI_MSIX_FLAGS, msix_max - 1);
|
||||
|
||||
/* Only enable and function mask bits are writable */
|
||||
pci_set_word(pci_dev->wmask + pos + PCI_MSIX_FLAGS,
|
||||
|
@ -1280,9 +1282,7 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev)
|
|||
bar_nr = msix_table_entry & PCI_MSIX_FLAGS_BIRMASK;
|
||||
msix_table_entry &= ~PCI_MSIX_FLAGS_BIRMASK;
|
||||
dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry;
|
||||
dev->msix_max = pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS);
|
||||
dev->msix_max &= PCI_MSIX_FLAGS_QSIZE;
|
||||
dev->msix_max += 1;
|
||||
dev->msix_max = msix_max;
|
||||
}
|
||||
|
||||
/* Minimal PM support, nothing writable, device appears to NAK changes */
|
||||
|
|
20
hw/i386/pc.c
20
hw/i386/pc.c
|
@ -1093,21 +1093,13 @@ PcGuestInfo *pc_guest_info_init(ram_addr_t below_4g_mem_size,
|
|||
return guest_info;
|
||||
}
|
||||
|
||||
void pc_init_pci64_hole(PcPciInfo *pci_info, uint64_t pci_hole64_start,
|
||||
uint64_t pci_hole64_size)
|
||||
/* setup pci memory address space mapping into system address space */
|
||||
void pc_pci_as_mapping_init(Object *owner, MemoryRegion *system_memory,
|
||||
MemoryRegion *pci_address_space)
|
||||
{
|
||||
if ((sizeof(hwaddr) == 4) || (!pci_hole64_size)) {
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* BIOS does not set MTRR entries for the 64 bit window, so no need to
|
||||
* align address to power of two. Align address at 1G, this makes sure
|
||||
* it can be exactly covered with a PAT entry even when using huge
|
||||
* pages.
|
||||
*/
|
||||
pci_info->w64.begin = ROUND_UP(pci_hole64_start, 0x1ULL << 30);
|
||||
pci_info->w64.end = pci_info->w64.begin + pci_hole64_size;
|
||||
assert(pci_info->w64.begin <= pci_info->w64.end);
|
||||
/* Set to lower priority than RAM */
|
||||
memory_region_add_subregion_overlap(system_memory, 0x0,
|
||||
pci_address_space, -1);
|
||||
}
|
||||
|
||||
void pc_acpi_init(const char *default_dsdt)
|
||||
|
|
|
@ -150,7 +150,6 @@ static void pc_init1(QEMUMachineInitArgs *args,
|
|||
pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, &isa_bus, gsi,
|
||||
system_memory, system_io, args->ram_size,
|
||||
below_4g_mem_size,
|
||||
0x100000000ULL - below_4g_mem_size,
|
||||
above_4g_mem_size,
|
||||
pci_memory, ram_memory);
|
||||
} else {
|
||||
|
|
|
@ -1290,7 +1290,7 @@ const VMStateDescription vmstate_ahci = {
|
|||
VMSTATE_UINT32(control_regs.impl, AHCIState),
|
||||
VMSTATE_UINT32(control_regs.version, AHCIState),
|
||||
VMSTATE_UINT32(idp_index, AHCIState),
|
||||
VMSTATE_INT32(ports, AHCIState),
|
||||
VMSTATE_INT32_EQUAL(ports, AHCIState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1619,7 +1619,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd)
|
|||
case 2: /* extended self test */
|
||||
s->smart_selftest_count++;
|
||||
if (s->smart_selftest_count > 21) {
|
||||
s->smart_selftest_count = 0;
|
||||
s->smart_selftest_count = 1;
|
||||
}
|
||||
n = 2 + (s->smart_selftest_count - 1) * 24;
|
||||
s->smart_selftest_data[n] = s->sector;
|
||||
|
|
|
@ -594,7 +594,7 @@ static void microdrive_realize(DeviceState *dev, Error **errp)
|
|||
{
|
||||
MicroDriveState *md = MICRODRIVE(dev);
|
||||
|
||||
ide_init2(&md->bus, qemu_allocate_irqs(md_set_irq, md, 1)[0]);
|
||||
ide_init2(&md->bus, qemu_allocate_irq(md_set_irq, md, 0));
|
||||
}
|
||||
|
||||
static void microdrive_init(Object *obj)
|
||||
|
|
|
@ -1070,9 +1070,21 @@ static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
|
|||
s->enabled = qemu_get_byte(f);
|
||||
s->host_mode = qemu_get_byte(f);
|
||||
s->function = qemu_get_byte(f);
|
||||
if (s->function < 0 || s->function >= ARRAY_SIZE(mode_regs)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->nextfunction = qemu_get_byte(f);
|
||||
if (s->nextfunction < 0 || s->nextfunction >= ARRAY_SIZE(mode_regs)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->precision = qemu_get_byte(f);
|
||||
if (s->precision < 0 || s->precision >= ARRAY_SIZE(resolution)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->nextprecision = qemu_get_byte(f);
|
||||
if (s->nextprecision < 0 || s->nextprecision >= ARRAY_SIZE(resolution)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->filter = qemu_get_byte(f);
|
||||
s->pin_func = qemu_get_byte(f);
|
||||
s->ref = qemu_get_byte(f);
|
||||
|
|
|
@ -418,7 +418,7 @@ static int exynos4210_combiner_init(SysBusDevice *sbd)
|
|||
qdev_init_gpio_in(dev, exynos4210_combiner_handler, IIC_NIRQ);
|
||||
|
||||
/* Connect SysBusDev irqs to device specific irqs */
|
||||
for (i = 0; i < IIC_NIRQ; i++) {
|
||||
for (i = 0; i < IIC_NGRP; i++) {
|
||||
sysbus_init_irq(sbd, &s->output_irq[i]);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#define GIC_SET_MODEL(irq) s->irq_state[irq].model = true
|
||||
#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = false
|
||||
#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
|
||||
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
|
||||
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level |= (cm)
|
||||
#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
|
||||
#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
|
||||
#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = true
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include "hw/sysbus.h"
|
||||
#include "hw/pci/msi.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
|
||||
//#define DEBUG_OPENPIC
|
||||
|
||||
|
@ -1416,7 +1417,7 @@ static void openpic_load_IRQ_queue(QEMUFile* f, IRQQueue *q)
|
|||
static int openpic_load(QEMUFile* f, void *opaque, int version_id)
|
||||
{
|
||||
OpenPICState *opp = (OpenPICState *)opaque;
|
||||
unsigned int i;
|
||||
unsigned int i, nb_cpus;
|
||||
|
||||
if (version_id != 1) {
|
||||
return -EINVAL;
|
||||
|
@ -1428,7 +1429,11 @@ static int openpic_load(QEMUFile* f, void *opaque, int version_id)
|
|||
qemu_get_be32s(f, &opp->spve);
|
||||
qemu_get_be32s(f, &opp->tfrr);
|
||||
|
||||
qemu_get_be32s(f, &opp->nb_cpus);
|
||||
qemu_get_be32s(f, &nb_cpus);
|
||||
if (opp->nb_cpus != nb_cpus) {
|
||||
return -EINVAL;
|
||||
}
|
||||
assert(nb_cpus > 0 && nb_cpus <= MAX_CPU);
|
||||
|
||||
for (i = 0; i < opp->nb_cpus; i++) {
|
||||
qemu_get_sbe32s(f, &opp->dst[i].ctpr);
|
||||
|
@ -1567,6 +1572,13 @@ static void openpic_realize(DeviceState *dev, Error **errp)
|
|||
{NULL}
|
||||
};
|
||||
|
||||
if (opp->nb_cpus > MAX_CPU) {
|
||||
error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
|
||||
TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
|
||||
(uint64_t)0, (uint64_t)MAX_CPU);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (opp->model) {
|
||||
case OPENPIC_MODEL_FSL_MPIC_20:
|
||||
default:
|
||||
|
|
|
@ -276,7 +276,7 @@ static bool vexpress_cfgctrl_read(arm_sysctl_state *s, unsigned int dcc,
|
|||
}
|
||||
break;
|
||||
case SYS_CFG_OSC:
|
||||
if (site == SYS_CFG_SITE_MB && device < sizeof(s->mb_clock)) {
|
||||
if (site == SYS_CFG_SITE_MB && device < ARRAY_SIZE(s->mb_clock)) {
|
||||
/* motherboard clock */
|
||||
*val = s->mb_clock[device];
|
||||
return true;
|
||||
|
@ -324,7 +324,7 @@ static bool vexpress_cfgctrl_write(arm_sysctl_state *s, unsigned int dcc,
|
|||
|
||||
switch (function) {
|
||||
case SYS_CFG_OSC:
|
||||
if (site == SYS_CFG_SITE_MB && device < sizeof(s->mb_clock)) {
|
||||
if (site == SYS_CFG_SITE_MB && device < ARRAY_SIZE(s->mb_clock)) {
|
||||
/* motherboard clock */
|
||||
s->mb_clock[device] = val;
|
||||
return true;
|
||||
|
|
|
@ -135,9 +135,9 @@ CBus *cbus_init(qemu_irq dat)
|
|||
CBusPriv *s = (CBusPriv *) g_malloc0(sizeof(*s));
|
||||
|
||||
s->dat_out = dat;
|
||||
s->cbus.clk = qemu_allocate_irqs(cbus_clk, s, 1)[0];
|
||||
s->cbus.dat = qemu_allocate_irqs(cbus_dat, s, 1)[0];
|
||||
s->cbus.sel = qemu_allocate_irqs(cbus_sel, s, 1)[0];
|
||||
s->cbus.clk = qemu_allocate_irq(cbus_clk, s, 0);
|
||||
s->cbus.dat = qemu_allocate_irq(cbus_dat, s, 0);
|
||||
s->cbus.sel = qemu_allocate_irq(cbus_sel, s, 0);
|
||||
|
||||
s->sel = 1;
|
||||
s->clk = 0;
|
||||
|
|
|
@ -878,8 +878,20 @@ static void vfio_disable_msi_common(VFIODevice *vdev)
|
|||
|
||||
static void vfio_disable_msix(VFIODevice *vdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
msix_unset_vector_notifiers(&vdev->pdev);
|
||||
|
||||
/*
|
||||
* MSI-X will only release vectors if MSI-X is still enabled on the
|
||||
* device, check through the rest and release it ourselves if necessary.
|
||||
*/
|
||||
for (i = 0; i < vdev->nr_vectors; i++) {
|
||||
if (vdev->msi_vectors[i].use) {
|
||||
vfio_msix_vector_release(&vdev->pdev, i);
|
||||
}
|
||||
}
|
||||
|
||||
if (vdev->nr_vectors) {
|
||||
vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
#include "hw/sysbus.h"
|
||||
#include "net/net.h"
|
||||
#include "migration/migration.h"
|
||||
#include <zlib.h>
|
||||
|
||||
//#define DEBUG_STELLARIS_ENET 1
|
||||
|
@ -75,6 +76,7 @@ typedef struct {
|
|||
NICConf conf;
|
||||
qemu_irq irq;
|
||||
MemoryRegion mmio;
|
||||
Error *migration_blocker;
|
||||
} stellaris_enet_state;
|
||||
|
||||
static void stellaris_enet_update(stellaris_enet_state *s)
|
||||
|
@ -252,17 +254,19 @@ static void stellaris_enet_write(void *opaque, hwaddr offset,
|
|||
s->tx_fifo[s->tx_fifo_len++] = value >> 24;
|
||||
}
|
||||
} else {
|
||||
s->tx_fifo[s->tx_fifo_len++] = value;
|
||||
s->tx_fifo[s->tx_fifo_len++] = value >> 8;
|
||||
s->tx_fifo[s->tx_fifo_len++] = value >> 16;
|
||||
s->tx_fifo[s->tx_fifo_len++] = value >> 24;
|
||||
if (s->tx_fifo_len + 4 <= ARRAY_SIZE(s->tx_fifo)) {
|
||||
s->tx_fifo[s->tx_fifo_len++] = value;
|
||||
s->tx_fifo[s->tx_fifo_len++] = value >> 8;
|
||||
s->tx_fifo[s->tx_fifo_len++] = value >> 16;
|
||||
s->tx_fifo[s->tx_fifo_len++] = value >> 24;
|
||||
}
|
||||
if (s->tx_fifo_len >= s->tx_frame_len) {
|
||||
/* We don't implement explicit CRC, so just chop it off. */
|
||||
if ((s->tctl & SE_TCTL_CRC) == 0)
|
||||
s->tx_frame_len -= 4;
|
||||
if ((s->tctl & SE_TCTL_PADEN) && s->tx_frame_len < 60) {
|
||||
memset(&s->tx_fifo[s->tx_frame_len], 0, 60 - s->tx_frame_len);
|
||||
s->tx_fifo_len = 60;
|
||||
s->tx_frame_len = 60;
|
||||
}
|
||||
qemu_send_packet(qemu_get_queue(s->nic), s->tx_fifo,
|
||||
s->tx_frame_len);
|
||||
|
@ -359,7 +363,7 @@ static int stellaris_enet_load(QEMUFile *f, void *opaque, int version_id)
|
|||
stellaris_enet_state *s = (stellaris_enet_state *)opaque;
|
||||
int i;
|
||||
|
||||
if (version_id != 1)
|
||||
if (1)
|
||||
return -EINVAL;
|
||||
|
||||
s->ris = qemu_get_be32(f);
|
||||
|
@ -420,6 +424,10 @@ static int stellaris_enet_init(SysBusDevice *sbd)
|
|||
stellaris_enet_reset(s);
|
||||
register_savevm(dev, "stellaris_enet", -1, 1,
|
||||
stellaris_enet_save, stellaris_enet_load, s);
|
||||
|
||||
error_setg(&s->migration_blocker,
|
||||
"stellaris_enet does not support migration");
|
||||
migrate_add_blocker(s->migration_blocker);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -427,6 +435,9 @@ static void stellaris_enet_unrealize(DeviceState *dev, Error **errp)
|
|||
{
|
||||
stellaris_enet_state *s = STELLARIS_ENET(dev);
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
|
||||
unregister_savevm(DEVICE(s), "stellaris_enet", s);
|
||||
|
||||
memory_region_destroy(&s->mmio);
|
||||
|
|
|
@ -515,6 +515,12 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
|
|||
}
|
||||
vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
|
||||
}
|
||||
|
||||
if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
|
||||
memset(n->vlans, 0, MAX_VLAN >> 3);
|
||||
} else {
|
||||
memset(n->vlans, 0xff, MAX_VLAN >> 3);
|
||||
}
|
||||
}
|
||||
|
||||
static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
|
||||
|
@ -837,6 +843,14 @@ static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void virtio_net_hdr_swap(struct virtio_net_hdr *hdr)
|
||||
{
|
||||
tswap16s(&hdr->hdr_len);
|
||||
tswap16s(&hdr->gso_size);
|
||||
tswap16s(&hdr->csum_start);
|
||||
tswap16s(&hdr->csum_offset);
|
||||
}
|
||||
|
||||
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
|
||||
* it never finds out that the packets don't have valid checksums. This
|
||||
* causes dhclient to get upset. Fedora's carried a patch for ages to
|
||||
|
@ -872,6 +886,7 @@ static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
|
|||
void *wbuf = (void *)buf;
|
||||
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
|
||||
size - n->host_hdr_len);
|
||||
virtio_net_hdr_swap(wbuf);
|
||||
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
|
||||
} else {
|
||||
struct virtio_net_hdr hdr = {
|
||||
|
@ -1080,6 +1095,14 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
|
|||
exit(1);
|
||||
}
|
||||
|
||||
if (n->has_vnet_hdr) {
|
||||
if (out_sg[0].iov_len < n->guest_hdr_len) {
|
||||
error_report("virtio-net header incorrect");
|
||||
exit(1);
|
||||
}
|
||||
virtio_net_hdr_swap((void *) out_sg[0].iov_base);
|
||||
}
|
||||
|
||||
/*
|
||||
* If host wants to see the guest header as is, we can
|
||||
* pass it on unchanged. Otherwise, copy just the parts
|
||||
|
@ -1336,10 +1359,17 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
|
|||
if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
|
||||
qemu_get_buffer(f, n->mac_table.macs,
|
||||
n->mac_table.in_use * ETH_ALEN);
|
||||
} else if (n->mac_table.in_use) {
|
||||
uint8_t *buf = g_malloc0(n->mac_table.in_use);
|
||||
qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
|
||||
g_free(buf);
|
||||
} else {
|
||||
int64_t i;
|
||||
|
||||
/* Overflow detected - can happen if source has a larger MAC table.
|
||||
* We simply set overflow flag so there's no need to maintain the
|
||||
* table of addresses, discard them all.
|
||||
* Note: 64 bit math to avoid integer overflow.
|
||||
*/
|
||||
for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
|
||||
qemu_get_byte(f);
|
||||
}
|
||||
n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
|
||||
n->mac_table.in_use = 0;
|
||||
}
|
||||
|
@ -1381,6 +1411,11 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
|
|||
}
|
||||
|
||||
n->curr_queues = qemu_get_be16(f);
|
||||
if (n->curr_queues > n->max_queues) {
|
||||
error_report("virtio-net: curr_queues %x > max_queues %x",
|
||||
n->curr_queues, n->max_queues);
|
||||
return -1;
|
||||
}
|
||||
for (i = 1; i < n->curr_queues; i++) {
|
||||
n->vqs[i].tx_waiting = qemu_get_be32(f);
|
||||
}
|
||||
|
@ -1570,16 +1605,15 @@ static int virtio_net_device_init(VirtIODevice *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_net_device_exit(DeviceState *qdev)
|
||||
static void virtio_net_device_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIONet *n = VIRTIO_NET(qdev);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VirtIONet *n = VIRTIO_NET(vdev);
|
||||
int i;
|
||||
|
||||
/* This will stop vhost backend if appropriate. */
|
||||
virtio_net_set_status(vdev, 0);
|
||||
|
||||
unregister_savevm(qdev, "virtio-net", n);
|
||||
unregister_savevm(DEVICE(vdev), "virtio-net", n);
|
||||
|
||||
if (n->netclient_name) {
|
||||
g_free(n->netclient_name);
|
||||
|
@ -1610,8 +1644,6 @@ static int virtio_net_device_exit(DeviceState *qdev)
|
|||
g_free(n->vqs);
|
||||
qemu_del_nic(n->nic);
|
||||
virtio_cleanup(vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_net_instance_init(Object *obj)
|
||||
|
@ -1638,10 +1670,10 @@ static void virtio_net_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = virtio_net_device_exit;
|
||||
dc->props = virtio_net_properties;
|
||||
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
|
||||
vdc->init = virtio_net_device_init;
|
||||
vdc->exit = virtio_net_device_exit;
|
||||
vdc->get_config = virtio_net_get_config;
|
||||
vdc->set_config = virtio_net_set_config;
|
||||
vdc->get_features = virtio_net_get_features;
|
||||
|
|
|
@ -52,6 +52,9 @@
|
|||
#define VMXNET3_DEVICE_VERSION 0x1
|
||||
#define VMXNET3_DEVICE_REVISION 0x1
|
||||
|
||||
/* Number of interrupt vectors for non-MSIx modes */
|
||||
#define VMXNET3_MAX_NMSIX_INTRS (1)
|
||||
|
||||
/* Macros for rings descriptors access */
|
||||
#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
|
||||
(vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
||||
|
@ -1305,6 +1308,51 @@ static bool vmxnet3_verify_intx(VMXNET3State *s, int intx)
|
|||
(pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1));
|
||||
}
|
||||
|
||||
static void vmxnet3_validate_interrupt_idx(bool is_msix, int idx)
|
||||
{
|
||||
int max_ints = is_msix ? VMXNET3_MAX_INTRS : VMXNET3_MAX_NMSIX_INTRS;
|
||||
if (idx >= max_ints) {
|
||||
hw_error("Bad interrupt index: %d\n", idx);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmxnet3_validate_interrupts(VMXNET3State *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
VMW_CFPRN("Verifying event interrupt index (%d)", s->event_int_idx);
|
||||
vmxnet3_validate_interrupt_idx(s->msix_used, s->event_int_idx);
|
||||
|
||||
for (i = 0; i < s->txq_num; i++) {
|
||||
int idx = s->txq_descr[i].intr_idx;
|
||||
VMW_CFPRN("Verifying TX queue %d interrupt index (%d)", i, idx);
|
||||
vmxnet3_validate_interrupt_idx(s->msix_used, idx);
|
||||
}
|
||||
|
||||
for (i = 0; i < s->rxq_num; i++) {
|
||||
int idx = s->rxq_descr[i].intr_idx;
|
||||
VMW_CFPRN("Verifying RX queue %d interrupt index (%d)", i, idx);
|
||||
vmxnet3_validate_interrupt_idx(s->msix_used, idx);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmxnet3_validate_queues(VMXNET3State *s)
|
||||
{
|
||||
/*
|
||||
* txq_num and rxq_num are total number of queues
|
||||
* configured by guest. These numbers must not
|
||||
* exceed corresponding maximal values.
|
||||
*/
|
||||
|
||||
if (s->txq_num > VMXNET3_DEVICE_MAX_TX_QUEUES) {
|
||||
hw_error("Bad TX queues number: %d\n", s->txq_num);
|
||||
}
|
||||
|
||||
if (s->rxq_num > VMXNET3_DEVICE_MAX_RX_QUEUES) {
|
||||
hw_error("Bad RX queues number: %d\n", s->rxq_num);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmxnet3_activate_device(VMXNET3State *s)
|
||||
{
|
||||
int i;
|
||||
|
@ -1351,7 +1399,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|||
VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
|
||||
|
||||
VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
|
||||
assert(s->txq_num <= VMXNET3_DEVICE_MAX_TX_QUEUES);
|
||||
vmxnet3_validate_queues(s);
|
||||
|
||||
qdescr_table_pa =
|
||||
VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
|
||||
|
@ -1447,6 +1495,8 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|||
sizeof(s->rxq_descr[i].rxq_stats));
|
||||
}
|
||||
|
||||
vmxnet3_validate_interrupts(s);
|
||||
|
||||
/* Make sure everything is in place before device activation */
|
||||
smp_wmb();
|
||||
|
||||
|
@ -2007,7 +2057,6 @@ vmxnet3_cleanup_msix(VMXNET3State *s)
|
|||
}
|
||||
}
|
||||
|
||||
#define VMXNET3_MSI_NUM_VECTORS (1)
|
||||
#define VMXNET3_MSI_OFFSET (0x50)
|
||||
#define VMXNET3_USE_64BIT (true)
|
||||
#define VMXNET3_PER_VECTOR_MASK (false)
|
||||
|
@ -2018,7 +2067,7 @@ vmxnet3_init_msi(VMXNET3State *s)
|
|||
PCIDevice *d = PCI_DEVICE(s);
|
||||
int res;
|
||||
|
||||
res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MSI_NUM_VECTORS,
|
||||
res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MAX_NMSIX_INTRS,
|
||||
VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK);
|
||||
if (0 > res) {
|
||||
VMW_WRPRN("Failed to initialize MSI, error %d", res);
|
||||
|
@ -2344,6 +2393,9 @@ static int vmxnet3_post_load(void *opaque, int version_id)
|
|||
}
|
||||
}
|
||||
|
||||
vmxnet3_validate_queues(s);
|
||||
vmxnet3_validate_interrupts(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -103,8 +103,6 @@ struct PCII440FXState {
|
|||
MemoryRegion *system_memory;
|
||||
MemoryRegion *pci_address_space;
|
||||
MemoryRegion *ram_memory;
|
||||
MemoryRegion pci_hole;
|
||||
MemoryRegion pci_hole_64bit;
|
||||
PAMMemoryRegion pam_regions[13];
|
||||
MemoryRegion smram_region;
|
||||
uint8_t smm_enabled;
|
||||
|
@ -313,8 +311,7 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
|
|||
MemoryRegion *address_space_mem,
|
||||
MemoryRegion *address_space_io,
|
||||
ram_addr_t ram_size,
|
||||
hwaddr pci_hole_start,
|
||||
hwaddr pci_hole_size,
|
||||
ram_addr_t below_4g_mem_size,
|
||||
ram_addr_t above_4g_mem_size,
|
||||
MemoryRegion *pci_address_space,
|
||||
MemoryRegion *ram_memory)
|
||||
|
@ -327,7 +324,6 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
|
|||
PCII440FXState *f;
|
||||
unsigned i;
|
||||
I440FXState *i440fx;
|
||||
uint64_t pci_hole64_size;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_I440FX_PCI_HOST_BRIDGE);
|
||||
s = PCI_HOST_BRIDGE(dev);
|
||||
|
@ -345,33 +341,12 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state,
|
|||
f->ram_memory = ram_memory;
|
||||
|
||||
i440fx = I440FX_PCI_HOST_BRIDGE(dev);
|
||||
/* Set PCI window size the way seabios has always done it. */
|
||||
/* Power of 2 so bios can cover it with a single MTRR */
|
||||
if (ram_size <= 0x80000000) {
|
||||
i440fx->pci_info.w32.begin = 0x80000000;
|
||||
} else if (ram_size <= 0xc0000000) {
|
||||
i440fx->pci_info.w32.begin = 0xc0000000;
|
||||
} else {
|
||||
i440fx->pci_info.w32.begin = 0xe0000000;
|
||||
}
|
||||
i440fx->pci_info.w32.begin = below_4g_mem_size;
|
||||
|
||||
memory_region_init_alias(&f->pci_hole, OBJECT(d), "pci-hole", f->pci_address_space,
|
||||
pci_hole_start, pci_hole_size);
|
||||
memory_region_add_subregion(f->system_memory, pci_hole_start, &f->pci_hole);
|
||||
/* setup pci memory mapping */
|
||||
pc_pci_as_mapping_init(OBJECT(f), f->system_memory,
|
||||
f->pci_address_space);
|
||||
|
||||
pci_hole64_size = pci_host_get_hole64_size(i440fx->pci_hole64_size);
|
||||
|
||||
pc_init_pci64_hole(&i440fx->pci_info, 0x100000000ULL + above_4g_mem_size,
|
||||
pci_hole64_size);
|
||||
memory_region_init_alias(&f->pci_hole_64bit, OBJECT(d), "pci-hole64",
|
||||
f->pci_address_space,
|
||||
i440fx->pci_info.w64.begin,
|
||||
pci_hole64_size);
|
||||
if (pci_hole64_size) {
|
||||
memory_region_add_subregion(f->system_memory,
|
||||
i440fx->pci_info.w64.begin,
|
||||
&f->pci_hole_64bit);
|
||||
}
|
||||
memory_region_init_alias(&f->smram_region, OBJECT(d), "smram-region",
|
||||
f->pci_address_space, 0xa0000, 0x20000);
|
||||
memory_region_add_subregion_overlap(f->system_memory, 0xa0000,
|
||||
|
|
|
@ -356,28 +356,11 @@ static int mch_init(PCIDevice *d)
|
|||
{
|
||||
int i;
|
||||
MCHPCIState *mch = MCH_PCI_DEVICE(d);
|
||||
uint64_t pci_hole64_size;
|
||||
|
||||
/* setup pci memory regions */
|
||||
memory_region_init_alias(&mch->pci_hole, OBJECT(mch), "pci-hole",
|
||||
mch->pci_address_space,
|
||||
mch->below_4g_mem_size,
|
||||
0x100000000ULL - mch->below_4g_mem_size);
|
||||
memory_region_add_subregion(mch->system_memory, mch->below_4g_mem_size,
|
||||
&mch->pci_hole);
|
||||
/* setup pci memory mapping */
|
||||
pc_pci_as_mapping_init(OBJECT(mch), mch->system_memory,
|
||||
mch->pci_address_space);
|
||||
|
||||
pci_hole64_size = pci_host_get_hole64_size(mch->pci_hole64_size);
|
||||
pc_init_pci64_hole(&mch->pci_info, 0x100000000ULL + mch->above_4g_mem_size,
|
||||
pci_hole64_size);
|
||||
memory_region_init_alias(&mch->pci_hole_64bit, OBJECT(mch), "pci-hole64",
|
||||
mch->pci_address_space,
|
||||
mch->pci_info.w64.begin,
|
||||
pci_hole64_size);
|
||||
if (pci_hole64_size) {
|
||||
memory_region_add_subregion(mch->system_memory,
|
||||
mch->pci_info.w64.begin,
|
||||
&mch->pci_hole_64bit);
|
||||
}
|
||||
/* smram */
|
||||
cpu_smm_register(&mch_set_smm, mch);
|
||||
memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region",
|
||||
|
|
|
@ -474,7 +474,7 @@ const VMStateDescription vmstate_pci_device = {
|
|||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.fields = (VMStateField []) {
|
||||
VMSTATE_INT32_LE(version_id, PCIDevice),
|
||||
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
|
||||
VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
|
||||
vmstate_info_pci_config,
|
||||
PCI_CONFIG_SPACE_SIZE),
|
||||
|
@ -491,7 +491,7 @@ const VMStateDescription vmstate_pcie_device = {
|
|||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.fields = (VMStateField []) {
|
||||
VMSTATE_INT32_LE(version_id, PCIDevice),
|
||||
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
|
||||
VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
|
||||
vmstate_info_pci_config,
|
||||
PCIE_CONFIG_SPACE_SIZE),
|
||||
|
@ -820,6 +820,7 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
|
|||
}
|
||||
|
||||
pci_dev->bus = bus;
|
||||
pci_dev->devfn = devfn;
|
||||
dma_as = pci_device_iommu_address_space(pci_dev);
|
||||
|
||||
memory_region_init_alias(&pci_dev->bus_master_enable_region,
|
||||
|
@ -829,7 +830,6 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
|
|||
address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_enable_region,
|
||||
name);
|
||||
|
||||
pci_dev->devfn = devfn;
|
||||
pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
|
||||
pci_dev->irq_state = 0;
|
||||
pci_config_alloc(pci_dev);
|
||||
|
|
|
@ -795,6 +795,13 @@ static const VMStateDescription vmstate_pcie_aer_err = {
|
|||
}
|
||||
};
|
||||
|
||||
static bool pcie_aer_state_log_num_valid(void *opaque, int version_id)
|
||||
{
|
||||
PCIEAERLog *s = opaque;
|
||||
|
||||
return s->log_num <= s->log_max;
|
||||
}
|
||||
|
||||
const VMStateDescription vmstate_pcie_aer_log = {
|
||||
.name = "PCIE_AER_ERROR_LOG",
|
||||
.version_id = 1,
|
||||
|
@ -802,7 +809,8 @@ const VMStateDescription vmstate_pcie_aer_log = {
|
|||
.minimum_version_id_old = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT16(log_num, PCIEAERLog),
|
||||
VMSTATE_UINT16(log_max, PCIEAERLog),
|
||||
VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog),
|
||||
VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid),
|
||||
VMSTATE_STRUCT_VARRAY_POINTER_UINT16(log, PCIEAERLog, log_num,
|
||||
vmstate_pcie_aer_err, PCIEAERErr),
|
||||
VMSTATE_END_OF_LIST()
|
||||
|
|
|
@ -195,7 +195,7 @@ static void pxa2xx_pcmcia_initfn(Object *obj)
|
|||
memory_region_add_subregion(&s->container_mem, 0x0c000000,
|
||||
&s->common_iomem);
|
||||
|
||||
s->slot.irq = qemu_allocate_irqs(pxa2xx_pcmcia_set_irq, s, 1)[0];
|
||||
s->slot.irq = qemu_allocate_irq(pxa2xx_pcmcia_set_irq, s, 0);
|
||||
|
||||
object_property_add_link(obj, "card", TYPE_PCMCIA_CARD,
|
||||
(Object **)&s->card, NULL);
|
||||
|
|
|
@ -342,6 +342,22 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
|||
|
||||
/* There is no cached config, allocate MSIs */
|
||||
if (!phb->msi_table[ndev].nvec) {
|
||||
int max_irqs = 0;
|
||||
if (ret_intr_type == RTAS_TYPE_MSI) {
|
||||
max_irqs = msi_nr_vectors_allocated(pdev);
|
||||
} else if (ret_intr_type == RTAS_TYPE_MSIX) {
|
||||
max_irqs = pdev->msix_entries_nr;
|
||||
}
|
||||
if (!max_irqs) {
|
||||
fprintf(stderr,
|
||||
"Requested interrupt type %d is not enabled for device#%d\n",
|
||||
ret_intr_type, ndev);
|
||||
rtas_st(rets, 0, -1); /* Hardware error */
|
||||
return;
|
||||
}
|
||||
if (req_num > max_irqs) {
|
||||
req_num = max_irqs;
|
||||
}
|
||||
irq = spapr_allocate_irq_block(req_num, false,
|
||||
ret_intr_type == RTAS_TYPE_MSI);
|
||||
if (irq < 0) {
|
||||
|
|
|
@ -722,9 +722,11 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void copy_irb_to_guest(IRB *dest, const IRB *src)
|
||||
static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw)
|
||||
{
|
||||
int i;
|
||||
uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
|
||||
uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
|
||||
|
||||
copy_scsw_to_guest(&dest->scsw, &src->scsw);
|
||||
|
||||
|
@ -734,8 +736,22 @@ static void copy_irb_to_guest(IRB *dest, const IRB *src)
|
|||
for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
|
||||
dest->ecw[i] = cpu_to_be32(src->ecw[i]);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
|
||||
dest->emw[i] = cpu_to_be32(src->emw[i]);
|
||||
/* extended measurements enabled? */
|
||||
if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
|
||||
!(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
|
||||
!(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
|
||||
return;
|
||||
}
|
||||
/* extended measurements pending? */
|
||||
if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
|
||||
return;
|
||||
}
|
||||
if ((stctl & SCSW_STCTL_PRIMARY) ||
|
||||
(stctl == SCSW_STCTL_SECONDARY) ||
|
||||
((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
|
||||
for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
|
||||
dest->emw[i] = cpu_to_be32(src->emw[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -781,7 +797,7 @@ int css_do_tsch(SubchDev *sch, IRB *target_irb)
|
|||
}
|
||||
}
|
||||
/* Store the irb to the guest. */
|
||||
copy_irb_to_guest(target_irb, &irb);
|
||||
copy_irb_to_guest(target_irb, &irb, p);
|
||||
|
||||
/* Clear conditions on subchannel, if applicable. */
|
||||
if (stctl & SCSW_STCTL_STATUS_PEND) {
|
||||
|
|
|
@ -26,11 +26,14 @@ void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn)
|
|||
|
||||
int s390_virtio_hypercall(CPUS390XState *env)
|
||||
{
|
||||
s390_virtio_fn fn = s390_diag500_table[env->regs[1]];
|
||||
s390_virtio_fn fn;
|
||||
|
||||
if (!fn) {
|
||||
return -EINVAL;
|
||||
if (env->regs[1] < MAX_DIAG_SUBCODES) {
|
||||
fn = s390_diag500_table[env->regs[1]];
|
||||
if (fn) {
|
||||
return fn(&env->regs[2]);
|
||||
}
|
||||
}
|
||||
|
||||
return fn(&env->regs[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -57,9 +57,10 @@ static const TypeInfo virtual_css_bus_info = {
|
|||
VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
|
||||
{
|
||||
VirtIODevice *vdev = NULL;
|
||||
VirtioCcwDevice *dev = sch->driver_data;
|
||||
|
||||
if (sch->driver_data) {
|
||||
vdev = ((VirtioCcwDevice *)sch->driver_data)->vdev;
|
||||
if (dev) {
|
||||
vdev = virtio_bus_get_device(&dev->bus);
|
||||
}
|
||||
return vdev;
|
||||
}
|
||||
|
@ -67,7 +68,8 @@ VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
|
|||
static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n,
|
||||
bool assign, bool set_handler)
|
||||
{
|
||||
VirtQueue *vq = virtio_get_queue(dev->vdev, n);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, n);
|
||||
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
|
||||
int r = 0;
|
||||
SubchDev *sch = dev->sch;
|
||||
|
@ -97,6 +99,7 @@ static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n,
|
|||
|
||||
static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
|
||||
{
|
||||
VirtIODevice *vdev;
|
||||
int n, r;
|
||||
|
||||
if (!(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) ||
|
||||
|
@ -104,8 +107,9 @@ static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
|
|||
dev->ioeventfd_started) {
|
||||
return;
|
||||
}
|
||||
vdev = virtio_bus_get_device(&dev->bus);
|
||||
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
|
||||
if (!virtio_queue_get_num(dev->vdev, n)) {
|
||||
if (!virtio_queue_get_num(vdev, n)) {
|
||||
continue;
|
||||
}
|
||||
r = virtio_ccw_set_guest2host_notifier(dev, n, true, true);
|
||||
|
@ -118,7 +122,7 @@ static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
|
|||
|
||||
assign_error:
|
||||
while (--n >= 0) {
|
||||
if (!virtio_queue_get_num(dev->vdev, n)) {
|
||||
if (!virtio_queue_get_num(vdev, n)) {
|
||||
continue;
|
||||
}
|
||||
r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
|
||||
|
@ -132,13 +136,15 @@ static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
|
|||
|
||||
static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
|
||||
{
|
||||
VirtIODevice *vdev;
|
||||
int n, r;
|
||||
|
||||
if (!dev->ioeventfd_started) {
|
||||
return;
|
||||
}
|
||||
vdev = virtio_bus_get_device(&dev->bus);
|
||||
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
|
||||
if (!virtio_queue_get_num(dev->vdev, n)) {
|
||||
if (!virtio_queue_get_num(vdev, n)) {
|
||||
continue;
|
||||
}
|
||||
r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
|
||||
|
@ -189,7 +195,7 @@ typedef struct VirtioFeatDesc {
|
|||
static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
|
||||
uint16_t index, uint16_t num)
|
||||
{
|
||||
VirtioCcwDevice *dev = sch->driver_data;
|
||||
VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
|
||||
|
||||
if (index > VIRTIO_PCI_QUEUE_MAX) {
|
||||
return -EINVAL;
|
||||
|
@ -200,23 +206,23 @@ static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
if (!vdev) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
virtio_queue_set_addr(dev->vdev, index, addr);
|
||||
virtio_queue_set_addr(vdev, index, addr);
|
||||
if (!addr) {
|
||||
virtio_queue_set_vector(dev->vdev, index, 0);
|
||||
virtio_queue_set_vector(vdev, index, 0);
|
||||
} else {
|
||||
/* Fail if we don't have a big enough queue. */
|
||||
/* TODO: Add interface to handle vring.num changing */
|
||||
if (virtio_queue_get_num(dev->vdev, index) > num) {
|
||||
if (virtio_queue_get_num(vdev, index) > num) {
|
||||
return -EINVAL;
|
||||
}
|
||||
virtio_queue_set_vector(dev->vdev, index, index);
|
||||
virtio_queue_set_vector(vdev, index, index);
|
||||
}
|
||||
/* tell notify handler in case of config change */
|
||||
dev->vdev->config_vector = VIRTIO_PCI_QUEUE_MAX;
|
||||
vdev->config_vector = VIRTIO_PCI_QUEUE_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -230,6 +236,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
hwaddr indicators;
|
||||
VqConfigBlock vq_config;
|
||||
VirtioCcwDevice *dev = sch->driver_data;
|
||||
VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
|
||||
bool check_len;
|
||||
int len;
|
||||
hwaddr hw_len;
|
||||
|
@ -272,7 +279,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
break;
|
||||
case CCW_CMD_VDEV_RESET:
|
||||
virtio_ccw_stop_ioeventfd(dev);
|
||||
virtio_reset(dev->vdev);
|
||||
virtio_reset(vdev);
|
||||
ret = 0;
|
||||
break;
|
||||
case CCW_CMD_READ_FEAT:
|
||||
|
@ -319,7 +326,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
features.features = ldl_le_phys(ccw.cda);
|
||||
if (features.index < ARRAY_SIZE(dev->host_features)) {
|
||||
virtio_bus_set_vdev_features(&dev->bus, features.features);
|
||||
dev->vdev->guest_features = features.features;
|
||||
vdev->guest_features = features.features;
|
||||
} else {
|
||||
/*
|
||||
* If the guest supports more feature bits, assert that it
|
||||
|
@ -337,30 +344,30 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
break;
|
||||
case CCW_CMD_READ_CONF:
|
||||
if (check_len) {
|
||||
if (ccw.count > dev->vdev->config_len) {
|
||||
if (ccw.count > vdev->config_len) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
len = MIN(ccw.count, dev->vdev->config_len);
|
||||
len = MIN(ccw.count, vdev->config_len);
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
virtio_bus_get_vdev_config(&dev->bus, dev->vdev->config);
|
||||
virtio_bus_get_vdev_config(&dev->bus, vdev->config);
|
||||
/* XXX config space endianness */
|
||||
cpu_physical_memory_write(ccw.cda, dev->vdev->config, len);
|
||||
cpu_physical_memory_write(ccw.cda, vdev->config, len);
|
||||
sch->curr_status.scsw.count = ccw.count - len;
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
case CCW_CMD_WRITE_CONF:
|
||||
if (check_len) {
|
||||
if (ccw.count > dev->vdev->config_len) {
|
||||
if (ccw.count > vdev->config_len) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
len = MIN(ccw.count, dev->vdev->config_len);
|
||||
len = MIN(ccw.count, vdev->config_len);
|
||||
hw_len = len;
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
|
@ -371,9 +378,9 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
} else {
|
||||
len = hw_len;
|
||||
/* XXX config space endianness */
|
||||
memcpy(dev->vdev->config, config, len);
|
||||
memcpy(vdev->config, config, len);
|
||||
cpu_physical_memory_unmap(config, hw_len, 0, hw_len);
|
||||
virtio_bus_set_vdev_config(&dev->bus, dev->vdev->config);
|
||||
virtio_bus_set_vdev_config(&dev->bus, vdev->config);
|
||||
sch->curr_status.scsw.count = ccw.count - len;
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -397,9 +404,9 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||
virtio_ccw_stop_ioeventfd(dev);
|
||||
}
|
||||
virtio_set_status(dev->vdev, status);
|
||||
if (dev->vdev->status == 0) {
|
||||
virtio_reset(dev->vdev);
|
||||
virtio_set_status(vdev, status);
|
||||
if (vdev->status == 0) {
|
||||
virtio_reset(vdev);
|
||||
}
|
||||
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
||||
virtio_ccw_start_ioeventfd(dev);
|
||||
|
@ -463,7 +470,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
|||
ret = -EFAULT;
|
||||
} else {
|
||||
vq_config.index = lduw_phys(ccw.cda);
|
||||
vq_config.num_max = virtio_queue_get_num(dev->vdev,
|
||||
vq_config.num_max = virtio_queue_get_num(vdev,
|
||||
vq_config.index);
|
||||
stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max);
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
|
||||
|
@ -495,7 +502,6 @@ static int virtio_ccw_device_init(VirtioCcwDevice *dev, VirtIODevice *vdev)
|
|||
sch->driver_data = dev;
|
||||
dev->sch = sch;
|
||||
|
||||
dev->vdev = vdev;
|
||||
dev->indicators = 0;
|
||||
|
||||
/* Initialize subchannel structure. */
|
||||
|
@ -608,7 +614,7 @@ static int virtio_ccw_device_init(VirtioCcwDevice *dev, VirtIODevice *vdev)
|
|||
memset(&sch->id, 0, sizeof(SenseId));
|
||||
sch->id.reserved = 0xff;
|
||||
sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
|
||||
sch->id.cu_model = dev->vdev->device_id;
|
||||
sch->id.cu_model = vdev->device_id;
|
||||
|
||||
/* Only the first 32 feature bits are used. */
|
||||
dev->host_features[0] = virtio_bus_get_vdev_features(&dev->bus,
|
||||
|
@ -631,7 +637,6 @@ static int virtio_ccw_exit(VirtioCcwDevice *dev)
|
|||
{
|
||||
SubchDev *sch = dev->sch;
|
||||
|
||||
virtio_ccw_stop_ioeventfd(dev);
|
||||
if (sch) {
|
||||
css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
|
||||
g_free(sch);
|
||||
|
@ -892,9 +897,10 @@ static unsigned virtio_ccw_get_features(DeviceState *d)
|
|||
static void virtio_ccw_reset(DeviceState *d)
|
||||
{
|
||||
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
||||
|
||||
virtio_ccw_stop_ioeventfd(dev);
|
||||
virtio_reset(dev->vdev);
|
||||
virtio_reset(vdev);
|
||||
css_reset_sch(dev->sch);
|
||||
dev->indicators = 0;
|
||||
dev->indicators2 = 0;
|
||||
|
@ -934,9 +940,10 @@ static int virtio_ccw_set_host_notifier(DeviceState *d, int n, bool assign)
|
|||
static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
|
||||
bool assign, bool with_irqfd)
|
||||
{
|
||||
VirtQueue *vq = virtio_get_queue(dev->vdev, n);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, n);
|
||||
EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(dev->vdev);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
|
||||
if (assign) {
|
||||
int r = event_notifier_init(notifier, 0);
|
||||
|
@ -952,16 +959,16 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
|
|||
* land in qemu (and only the irq fd) in this code.
|
||||
*/
|
||||
if (k->guest_notifier_mask) {
|
||||
k->guest_notifier_mask(dev->vdev, n, false);
|
||||
k->guest_notifier_mask(vdev, n, false);
|
||||
}
|
||||
/* get lost events and re-inject */
|
||||
if (k->guest_notifier_pending &&
|
||||
k->guest_notifier_pending(dev->vdev, n)) {
|
||||
k->guest_notifier_pending(vdev, n)) {
|
||||
event_notifier_set(notifier);
|
||||
}
|
||||
} else {
|
||||
if (k->guest_notifier_mask) {
|
||||
k->guest_notifier_mask(dev->vdev, n, true);
|
||||
k->guest_notifier_mask(vdev, n, true);
|
||||
}
|
||||
virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
|
||||
event_notifier_cleanup(notifier);
|
||||
|
@ -973,7 +980,7 @@ static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
|
|||
bool assigned)
|
||||
{
|
||||
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
|
||||
VirtIODevice *vdev = dev->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
|
||||
int r, n;
|
||||
|
||||
for (n = 0; n < nvqs; n++) {
|
||||
|
@ -1228,6 +1235,8 @@ static int virtio_ccw_busdev_unplug(DeviceState *dev)
|
|||
VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
|
||||
SubchDev *sch = _dev->sch;
|
||||
|
||||
virtio_ccw_stop_ioeventfd(_dev);
|
||||
|
||||
/*
|
||||
* We should arrive here only for device_del, since we don't support
|
||||
* direct hot(un)plug of channels, but only through virtio.
|
||||
|
|
|
@ -77,7 +77,6 @@ typedef struct VirtIOCCWDeviceClass {
|
|||
struct VirtioCcwDevice {
|
||||
DeviceState parent_obj;
|
||||
SubchDev *sch;
|
||||
VirtIODevice *vdev;
|
||||
char *bus_id;
|
||||
uint32_t host_features[VIRTIO_CCW_FEATURE_SIZE];
|
||||
VirtioBusState bus;
|
||||
|
|
|
@ -1101,6 +1101,21 @@ static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd)
|
|||
return MFI_STAT_OK;
|
||||
}
|
||||
|
||||
static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd)
|
||||
{
|
||||
uint16_t flags;
|
||||
|
||||
/* mbox0 contains flags */
|
||||
flags = le16_to_cpu(cmd->frame->dcmd.mbox[0]);
|
||||
trace_megasas_dcmd_ld_list_query(cmd->index, flags);
|
||||
if (flags == MR_LD_QUERY_TYPE_ALL ||
|
||||
flags == MR_LD_QUERY_TYPE_EXPOSED_TO_HOST) {
|
||||
return megasas_dcmd_ld_get_list(s, cmd);
|
||||
}
|
||||
|
||||
return MFI_STAT_OK;
|
||||
}
|
||||
|
||||
static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun,
|
||||
MegasasCmd *cmd)
|
||||
{
|
||||
|
@ -1404,6 +1419,8 @@ static const struct dcmd_cmd_tbl_t {
|
|||
megasas_dcmd_dummy },
|
||||
{ MFI_DCMD_LD_GET_LIST, "LD_GET_LIST",
|
||||
megasas_dcmd_ld_get_list},
|
||||
{ MFI_DCMD_LD_LIST_QUERY, "LD_LIST_QUERY",
|
||||
megasas_dcmd_ld_list_query },
|
||||
{ MFI_DCMD_LD_GET_INFO, "LD_GET_INFO",
|
||||
megasas_dcmd_ld_get_info },
|
||||
{ MFI_DCMD_LD_GET_PROP, "LD_GET_PROP",
|
||||
|
|
|
@ -164,6 +164,7 @@ typedef enum {
|
|||
MFI_DCMD_PD_BLINK = 0x02070100,
|
||||
MFI_DCMD_PD_UNBLINK = 0x02070200,
|
||||
MFI_DCMD_LD_GET_LIST = 0x03010000,
|
||||
MFI_DCMD_LD_LIST_QUERY = 0x03010100,
|
||||
MFI_DCMD_LD_GET_INFO = 0x03020000,
|
||||
MFI_DCMD_LD_GET_PROP = 0x03030000,
|
||||
MFI_DCMD_LD_SET_PROP = 0x03040000,
|
||||
|
@ -411,6 +412,14 @@ typedef enum {
|
|||
MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, /*query for system drives */
|
||||
} mfi_pd_query_type;
|
||||
|
||||
typedef enum {
|
||||
MR_LD_QUERY_TYPE_ALL = 0,
|
||||
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
|
||||
MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
|
||||
MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
|
||||
MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
|
||||
} mfi_ld_query_type;
|
||||
|
||||
/*
|
||||
* Other propertities and definitions
|
||||
*/
|
||||
|
|
|
@ -469,6 +469,8 @@ static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
|
|||
r->req.dev->sense_is_ua = false;
|
||||
}
|
||||
break;
|
||||
case TEST_UNIT_READY:
|
||||
break;
|
||||
default:
|
||||
scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
|
||||
scsi_req_complete(req, CHECK_CONDITION);
|
||||
|
@ -886,7 +888,6 @@ static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
|||
case RELEASE:
|
||||
case ERASE:
|
||||
case ALLOW_MEDIUM_REMOVAL:
|
||||
case VERIFY_10:
|
||||
case SEEK_10:
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
|
@ -903,6 +904,16 @@ static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
|||
case ALLOW_OVERWRITE:
|
||||
cmd->xfer = 0;
|
||||
break;
|
||||
case VERIFY_10:
|
||||
case VERIFY_12:
|
||||
case VERIFY_16:
|
||||
if ((buf[1] & 2) == 0) {
|
||||
cmd->xfer = 0;
|
||||
} else if ((buf[1] & 4) != 0) {
|
||||
cmd->xfer = 1;
|
||||
}
|
||||
cmd->xfer *= dev->blocksize;
|
||||
break;
|
||||
case MODE_SENSE:
|
||||
break;
|
||||
case WRITE_SAME_10:
|
||||
|
@ -1100,6 +1111,9 @@ static void scsi_cmd_xfer_mode(SCSICommand *cmd)
|
|||
case WRITE_VERIFY_12:
|
||||
case WRITE_16:
|
||||
case WRITE_VERIFY_16:
|
||||
case VERIFY_10:
|
||||
case VERIFY_12:
|
||||
case VERIFY_16:
|
||||
case COPY:
|
||||
case COPY_VERIFY:
|
||||
case COMPARE:
|
||||
|
|
|
@ -1597,6 +1597,14 @@ static void scsi_disk_emulate_write_data(SCSIRequest *req)
|
|||
scsi_disk_emulate_unmap(r, r->iov.iov_base);
|
||||
break;
|
||||
|
||||
case VERIFY_10:
|
||||
case VERIFY_12:
|
||||
case VERIFY_16:
|
||||
if (r->req.status == -1) {
|
||||
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
|
@ -1837,6 +1845,14 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
|
|||
case UNMAP:
|
||||
DPRINTF("Unmap (len %lu)\n", (long)r->req.cmd.xfer);
|
||||
break;
|
||||
case VERIFY_10:
|
||||
case VERIFY_12:
|
||||
case VERIFY_16:
|
||||
DPRINTF("Verify (bytchk %lu)\n", (r->req.buf[1] >> 1) & 3);
|
||||
if (req->cmd.buf[1] & 6) {
|
||||
goto illegal_request;
|
||||
}
|
||||
break;
|
||||
case WRITE_SAME_10:
|
||||
case WRITE_SAME_16:
|
||||
nb_sectors = scsi_data_cdb_length(r->req.cmd.buf);
|
||||
|
@ -1936,10 +1952,6 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
|
|||
scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
|
||||
return 0;
|
||||
}
|
||||
/* fallthrough */
|
||||
case VERIFY_10:
|
||||
case VERIFY_12:
|
||||
case VERIFY_16:
|
||||
DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
|
||||
(command & 0xe) == 0xe ? "And Verify " : "",
|
||||
r->req.cmd.lba, len);
|
||||
|
@ -2169,6 +2181,7 @@ static const SCSIReqOps scsi_disk_emulate_reqops = {
|
|||
.send_command = scsi_disk_emulate_command,
|
||||
.read_data = scsi_disk_emulate_read_data,
|
||||
.write_data = scsi_disk_emulate_write_data,
|
||||
.cancel_io = scsi_cancel_io,
|
||||
.get_buf = scsi_get_buf,
|
||||
};
|
||||
|
||||
|
@ -2207,14 +2220,14 @@ static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
|
|||
[UNMAP] = &scsi_disk_emulate_reqops,
|
||||
[WRITE_SAME_10] = &scsi_disk_emulate_reqops,
|
||||
[WRITE_SAME_16] = &scsi_disk_emulate_reqops,
|
||||
[VERIFY_10] = &scsi_disk_emulate_reqops,
|
||||
[VERIFY_12] = &scsi_disk_emulate_reqops,
|
||||
[VERIFY_16] = &scsi_disk_emulate_reqops,
|
||||
|
||||
[READ_6] = &scsi_disk_dma_reqops,
|
||||
[READ_10] = &scsi_disk_dma_reqops,
|
||||
[READ_12] = &scsi_disk_dma_reqops,
|
||||
[READ_16] = &scsi_disk_dma_reqops,
|
||||
[VERIFY_10] = &scsi_disk_dma_reqops,
|
||||
[VERIFY_12] = &scsi_disk_dma_reqops,
|
||||
[VERIFY_16] = &scsi_disk_dma_reqops,
|
||||
[WRITE_6] = &scsi_disk_dma_reqops,
|
||||
[WRITE_10] = &scsi_disk_dma_reqops,
|
||||
[WRITE_12] = &scsi_disk_dma_reqops,
|
||||
|
@ -2359,7 +2372,7 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
|
|||
* ones (such as WRITE SAME or EXTENDED COPY, etc.). So, without
|
||||
* O_DIRECT everything must go through SG_IO.
|
||||
*/
|
||||
if (bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE) {
|
||||
if (!(bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,8 +37,6 @@ do { fprintf(stderr, "scsi-generic: " fmt , ## __VA_ARGS__); } while (0)
|
|||
#include <scsi/sg.h>
|
||||
#include "block/scsi.h"
|
||||
|
||||
#define SCSI_SENSE_BUF_SIZE 96
|
||||
|
||||
#define SG_ERR_DRIVER_TIMEOUT 0x06
|
||||
#define SG_ERR_DRIVER_SENSE 0x08
|
||||
|
||||
|
|
|
@ -60,7 +60,6 @@
|
|||
#define VSCSI_MAX_SECTORS 4096
|
||||
#define VSCSI_REQ_LIMIT 24
|
||||
|
||||
#define SCSI_SENSE_BUF_SIZE 96
|
||||
#define SRP_RSP_SENSE_DATA_LEN 18
|
||||
|
||||
typedef union vscsi_crq {
|
||||
|
|
|
@ -240,11 +240,10 @@ static int vhost_scsi_init(VirtIODevice *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_scsi_exit(DeviceState *qdev)
|
||||
static void vhost_scsi_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VHostSCSI *s = VHOST_SCSI(qdev);
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(qdev);
|
||||
VHostSCSI *s = VHOST_SCSI(vdev);
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
|
||||
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
|
@ -253,7 +252,7 @@ static int vhost_scsi_exit(DeviceState *qdev)
|
|||
vhost_scsi_set_status(vdev, 0);
|
||||
|
||||
g_free(s->dev.vqs);
|
||||
return virtio_scsi_common_exit(vs);
|
||||
virtio_scsi_common_exit(vs);
|
||||
}
|
||||
|
||||
static Property vhost_scsi_properties[] = {
|
||||
|
@ -265,10 +264,10 @@ static void vhost_scsi_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = vhost_scsi_exit;
|
||||
dc->props = vhost_scsi_properties;
|
||||
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
||||
vdc->init = vhost_scsi_init;
|
||||
vdc->exit = vhost_scsi_exit;
|
||||
vdc->get_features = vhost_scsi_get_features;
|
||||
vdc->set_config = vhost_scsi_set_config;
|
||||
vdc->set_status = vhost_scsi_set_status;
|
||||
|
|
|
@ -147,6 +147,15 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
|
|||
qemu_get_be32s(f, &n);
|
||||
assert(n < vs->conf.num_queues);
|
||||
qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
|
||||
/* TODO: add a way for SCSIBusInfo's load_request to fail,
|
||||
* and fail migration instead of asserting here.
|
||||
* When we do, we might be able to re-enable NDEBUG below.
|
||||
*/
|
||||
#ifdef NDEBUG
|
||||
#error building with NDEBUG is not supported
|
||||
#endif
|
||||
assert(req->elem.in_num <= ARRAY_SIZE(req->elem.in_sg));
|
||||
assert(req->elem.out_num <= ARRAY_SIZE(req->elem.out_sg));
|
||||
virtio_scsi_parse_req(s, vs->cmd_vqs[n], req);
|
||||
|
||||
scsi_req_ref(sreq);
|
||||
|
@ -306,6 +315,10 @@ static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
|
|||
VirtIOSCSIReq *req = r->hba_private;
|
||||
uint32_t sense_len;
|
||||
|
||||
if (r->io_canceled) {
|
||||
return;
|
||||
}
|
||||
|
||||
req->resp.cmd->response = VIRTIO_SCSI_S_OK;
|
||||
req->resp.cmd->status = status;
|
||||
if (req->resp.cmd->status == GOOD) {
|
||||
|
@ -483,7 +496,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
uint32_t event, uint32_t reason)
|
||||
{
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
||||
VirtIOSCSIReq *req = virtio_scsi_pop_req(s, vs->event_vq);
|
||||
VirtIOSCSIReq *req;
|
||||
VirtIOSCSIEvent *evt;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
int in_size;
|
||||
|
@ -492,6 +505,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
req = virtio_scsi_pop_req(s, vs->event_vq);
|
||||
if (!req) {
|
||||
s->events_dropped = true;
|
||||
return;
|
||||
|
@ -516,7 +530,7 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
evt->event = event;
|
||||
evt->reason = reason;
|
||||
if (!dev) {
|
||||
assert(event == VIRTIO_SCSI_T_NO_EVENT);
|
||||
assert(event == VIRTIO_SCSI_T_EVENTS_MISSED);
|
||||
} else {
|
||||
evt->lun[0] = 1;
|
||||
evt->lun[1] = dev->id;
|
||||
|
@ -644,22 +658,21 @@ static int virtio_scsi_device_init(VirtIODevice *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int virtio_scsi_common_exit(VirtIOSCSICommon *vs)
|
||||
void virtio_scsi_common_exit(VirtIOSCSICommon *vs)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(vs);
|
||||
|
||||
g_free(vs->cmd_vqs);
|
||||
virtio_cleanup(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_scsi_device_exit(DeviceState *qdev)
|
||||
static void virtio_scsi_device_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(qdev);
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(qdev);
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
|
||||
|
||||
unregister_savevm(qdev, "virtio-scsi", s);
|
||||
return virtio_scsi_common_exit(vs);
|
||||
unregister_savevm(DEVICE(vdev), "virtio-scsi", s);
|
||||
virtio_scsi_common_exit(vs);
|
||||
}
|
||||
|
||||
static Property virtio_scsi_properties[] = {
|
||||
|
@ -680,10 +693,10 @@ static void virtio_scsi_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = virtio_scsi_device_exit;
|
||||
dc->props = virtio_scsi_properties;
|
||||
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
||||
vdc->init = virtio_scsi_device_init;
|
||||
vdc->exit = virtio_scsi_device_exit;
|
||||
vdc->set_config = virtio_scsi_set_config;
|
||||
vdc->get_features = virtio_scsi_get_features;
|
||||
vdc->reset = virtio_scsi_reset;
|
||||
|
|
|
@ -625,7 +625,7 @@ struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta,
|
|||
exit(1);
|
||||
}
|
||||
|
||||
s->cdet = qemu_allocate_irqs(omap_mmc_cover_cb, s, 1)[0];
|
||||
s->cdet = qemu_allocate_irq(omap_mmc_cover_cb, s, 0);
|
||||
sd_set_cb(s->card, NULL, s->cdet);
|
||||
|
||||
return s;
|
||||
|
|
|
@ -1169,8 +1169,8 @@ static void sdhci_initfn(Object *obj)
|
|||
if (s->card == NULL) {
|
||||
exit(1);
|
||||
}
|
||||
s->eject_cb = qemu_allocate_irqs(sdhci_insert_eject_cb, s, 1)[0];
|
||||
s->ro_cb = qemu_allocate_irqs(sdhci_card_readonly_cb, s, 1)[0];
|
||||
s->eject_cb = qemu_allocate_irq(sdhci_insert_eject_cb, s, 0);
|
||||
s->ro_cb = qemu_allocate_irq(sdhci_card_readonly_cb, s, 0);
|
||||
sd_set_cb(s->card, s->ro_cb, s->eject_cb);
|
||||
|
||||
s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
|
||||
|
@ -1185,8 +1185,8 @@ static void sdhci_uninitfn(Object *obj)
|
|||
timer_free(s->insert_timer);
|
||||
timer_del(s->transfer_timer);
|
||||
timer_free(s->transfer_timer);
|
||||
qemu_free_irqs(&s->eject_cb);
|
||||
qemu_free_irqs(&s->ro_cb);
|
||||
qemu_free_irq(s->eject_cb);
|
||||
qemu_free_irq(s->ro_cb);
|
||||
|
||||
if (s->fifo_buffer) {
|
||||
g_free(s->fifo_buffer);
|
||||
|
|
|
@ -230,8 +230,17 @@ static int ssi_sd_load(QEMUFile *f, void *opaque, int version_id)
|
|||
for (i = 0; i < 5; i++)
|
||||
s->response[i] = qemu_get_be32(f);
|
||||
s->arglen = qemu_get_be32(f);
|
||||
if (s->mode == SSI_SD_CMDARG &&
|
||||
(s->arglen < 0 || s->arglen >= ARRAY_SIZE(s->cmdarg))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
s->response_pos = qemu_get_be32(f);
|
||||
s->stopping = qemu_get_be32(f);
|
||||
if (s->mode == SSI_SD_RESPONSE &&
|
||||
(s->response_pos < 0 || s->response_pos >= ARRAY_SIZE(s->response) ||
|
||||
(!s->stopping && s->arglen > ARRAY_SIZE(s->response)))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ss->cs = qemu_get_be32(f);
|
||||
|
||||
|
|
|
@ -838,6 +838,5 @@ SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem)
|
|||
qemu_irq sh7750_irl(SH7750State *s)
|
||||
{
|
||||
sh_intc_toggle_source(sh_intc_source(&s->intc, IRL), 1, 0); /* enable */
|
||||
return qemu_allocate_irqs(sh_intc_set_irl, sh_intc_source(&s->intc, IRL),
|
||||
1)[0];
|
||||
return qemu_allocate_irq(sh_intc_set_irl, sh_intc_source(&s->intc, IRL), 0);
|
||||
}
|
||||
|
|
|
@ -240,11 +240,25 @@ static const MemoryRegionOps pl022_ops = {
|
|||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
};
|
||||
|
||||
static int pl022_post_load(void *opaque, int version_id)
|
||||
{
|
||||
PL022State *s = opaque;
|
||||
|
||||
if (s->tx_fifo_head < 0 ||
|
||||
s->tx_fifo_head >= ARRAY_SIZE(s->tx_fifo) ||
|
||||
s->rx_fifo_head < 0 ||
|
||||
s->rx_fifo_head >= ARRAY_SIZE(s->rx_fifo)) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_pl022 = {
|
||||
.name = "pl022_ssp",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.minimum_version_id_old = 1,
|
||||
.post_load = pl022_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(cr0, PL022State),
|
||||
VMSTATE_UINT32(cr1, PL022State),
|
||||
|
|
|
@ -320,6 +320,7 @@ static uint64_t icp_pit_read(void *opaque, hwaddr offset,
|
|||
n = offset >> 8;
|
||||
if (n > 2) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad timer %d\n", __func__, n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return arm_timer_read(s->timer[n], offset & 0xff);
|
||||
|
@ -334,6 +335,7 @@ static void icp_pit_write(void *opaque, hwaddr offset,
|
|||
n = offset >> 8;
|
||||
if (n > 2) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad timer %d\n", __func__, n);
|
||||
return;
|
||||
}
|
||||
|
||||
arm_timer_write(s->timer[n], offset & 0xff, value);
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
|
||||
#define HPET_MSI_SUPPORT 0
|
||||
|
||||
#define TYPE_HPET "hpet"
|
||||
#define HPET(obj) OBJECT_CHECK(HPETState, (obj), TYPE_HPET)
|
||||
|
||||
struct HPETState;
|
||||
|
@ -228,6 +227,18 @@ static int hpet_pre_load(void *opaque)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool hpet_validate_num_timers(void *opaque, int version_id)
|
||||
{
|
||||
HPETState *s = opaque;
|
||||
|
||||
if (s->num_timers < HPET_MIN_TIMERS) {
|
||||
return false;
|
||||
} else if (s->num_timers > HPET_MAX_TIMERS) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static int hpet_post_load(void *opaque, int version_id)
|
||||
{
|
||||
HPETState *s = opaque;
|
||||
|
@ -296,6 +307,7 @@ static const VMStateDescription vmstate_hpet = {
|
|||
VMSTATE_UINT64(isr, HPETState),
|
||||
VMSTATE_UINT64(hpet_counter, HPETState),
|
||||
VMSTATE_UINT8_V(num_timers, HPETState, 2),
|
||||
VMSTATE_VALIDATE("num_timers in range", hpet_validate_num_timers),
|
||||
VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0,
|
||||
vmstate_hpet_timer, HPETTimer),
|
||||
VMSTATE_END_OF_LIST()
|
||||
|
@ -757,11 +769,6 @@ static void hpet_device_class_init(ObjectClass *klass, void *data)
|
|||
dc->props = hpet_device_properties;
|
||||
}
|
||||
|
||||
bool hpet_find(void)
|
||||
{
|
||||
return object_resolve_path_type("", TYPE_HPET, NULL);
|
||||
}
|
||||
|
||||
static const TypeInfo hpet_device_info = {
|
||||
.name = TYPE_HPET,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
|
|
|
@ -227,7 +227,7 @@ static void omap_gp_timer_clk_update(void *opaque, int line, int on)
|
|||
static void omap_gp_timer_clk_setup(struct omap_gp_timer_s *timer)
|
||||
{
|
||||
omap_clk_adduser(timer->clk,
|
||||
qemu_allocate_irqs(omap_gp_timer_clk_update, timer, 1)[0]);
|
||||
qemu_allocate_irq(omap_gp_timer_clk_update, timer, 0));
|
||||
timer->rate = omap_clk_getrate(timer->clk);
|
||||
}
|
||||
|
||||
|
@ -476,7 +476,7 @@ struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta,
|
|||
s->clk = fclk;
|
||||
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_tick, s);
|
||||
s->match = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_match, s);
|
||||
s->in = qemu_allocate_irqs(omap_gp_timer_input, s, 1)[0];
|
||||
s->in = qemu_allocate_irq(omap_gp_timer_input, s, 0);
|
||||
omap_gp_timer_reset(s);
|
||||
omap_gp_timer_clk_setup(s);
|
||||
|
||||
|
|
|
@ -47,7 +47,9 @@ static int usb_device_post_load(void *opaque, int version_id)
|
|||
} else {
|
||||
dev->attached = 1;
|
||||
}
|
||||
if (dev->setup_index >= sizeof(dev->data_buf) ||
|
||||
if (dev->setup_index < 0 ||
|
||||
dev->setup_len < 0 ||
|
||||
dev->setup_index >= sizeof(dev->data_buf) ||
|
||||
dev->setup_len >= sizeof(dev->data_buf)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/usb.h"
|
||||
#include "hw/usb/desc.h"
|
||||
#include "sysemu/bt.h"
|
||||
|
@ -506,6 +507,14 @@ static int usb_bt_initfn(USBDevice *dev)
|
|||
|
||||
usb_desc_create_serial(dev);
|
||||
usb_desc_init(dev);
|
||||
s->dev.opaque = s;
|
||||
if (!s->hci) {
|
||||
s->hci = bt_new_hci(qemu_find_bt_vlan(0));
|
||||
}
|
||||
s->hci->opaque = s;
|
||||
s->hci->evt_recv = usb_bt_out_hci_packet_event;
|
||||
s->hci->acl_recv = usb_bt_out_hci_packet_acl;
|
||||
usb_bt_handle_reset(&s->dev);
|
||||
s->intr = usb_ep_get(dev, USB_TOKEN_IN, USB_EVT_EP);
|
||||
|
||||
return 0;
|
||||
|
@ -516,6 +525,7 @@ static USBDevice *usb_bt_init(USBBus *bus, const char *cmdline)
|
|||
USBDevice *dev;
|
||||
struct USBBtState *s;
|
||||
HCIInfo *hci;
|
||||
const char *name = "usb-bt-dongle";
|
||||
|
||||
if (*cmdline) {
|
||||
hci = hci_init(cmdline);
|
||||
|
@ -525,19 +535,17 @@ static USBDevice *usb_bt_init(USBBus *bus, const char *cmdline)
|
|||
|
||||
if (!hci)
|
||||
return NULL;
|
||||
dev = usb_create_simple(bus, "usb-bt-dongle");
|
||||
dev = usb_create(bus, name);
|
||||
if (!dev) {
|
||||
error_report("Failed to create USB device '%s'", name);
|
||||
return NULL;
|
||||
}
|
||||
s = DO_UPCAST(struct USBBtState, dev, dev);
|
||||
s->dev.opaque = s;
|
||||
|
||||
s->hci = hci;
|
||||
s->hci->opaque = s;
|
||||
s->hci->evt_recv = usb_bt_out_hci_packet_event;
|
||||
s->hci->acl_recv = usb_bt_out_hci_packet_acl;
|
||||
|
||||
usb_bt_handle_reset(&s->dev);
|
||||
if (qdev_init(&dev->qdev) < 0) {
|
||||
error_report("Failed to initialize USB device '%s'", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
@ -309,7 +309,9 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
|
|||
uint64_t size)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
int r = 0;
|
||||
|
||||
for (i = 0; !r && i < dev->nvqs; ++i) {
|
||||
struct vhost_virtqueue *vq = dev->vqs + i;
|
||||
hwaddr l;
|
||||
void *p;
|
||||
|
@ -321,15 +323,15 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
|
|||
p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
|
||||
if (!p || l != vq->ring_size) {
|
||||
fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
|
||||
return -ENOMEM;
|
||||
r = -ENOMEM;
|
||||
}
|
||||
if (p != vq->ring) {
|
||||
fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
|
||||
return -EBUSY;
|
||||
r = -EBUSY;
|
||||
}
|
||||
cpu_physical_memory_unmap(p, l, 0, 0);
|
||||
}
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
|
||||
|
|
|
@ -370,16 +370,14 @@ static int virtio_balloon_device_init(VirtIODevice *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_balloon_device_exit(DeviceState *qdev)
|
||||
static void virtio_balloon_device_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBalloon *s = VIRTIO_BALLOON(qdev);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
|
||||
|
||||
balloon_stats_destroy_timer(s);
|
||||
qemu_remove_balloon_handler(s);
|
||||
unregister_savevm(qdev, "virtio-balloon", s);
|
||||
unregister_savevm(DEVICE(vdev), "virtio-balloon", s);
|
||||
virtio_cleanup(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property virtio_balloon_properties[] = {
|
||||
|
@ -390,10 +388,10 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = virtio_balloon_device_exit;
|
||||
dc->props = virtio_balloon_properties;
|
||||
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
||||
vdc->init = virtio_balloon_device_init;
|
||||
vdc->exit = virtio_balloon_device_exit;
|
||||
vdc->get_config = virtio_balloon_get_config;
|
||||
vdc->set_config = virtio_balloon_set_config;
|
||||
vdc->get_features = virtio_balloon_get_features;
|
||||
|
|
|
@ -37,8 +37,8 @@ do { printf("virtio_bus: " fmt , ## __VA_ARGS__); } while (0)
|
|||
#define DPRINTF(fmt, ...) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Plug the VirtIODevice */
|
||||
int virtio_bus_plug_device(VirtIODevice *vdev)
|
||||
/* A VirtIODevice is being plugged */
|
||||
int virtio_bus_device_plugged(VirtIODevice *vdev)
|
||||
{
|
||||
DeviceState *qdev = DEVICE(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(qdev));
|
||||
|
@ -46,8 +46,6 @@ int virtio_bus_plug_device(VirtIODevice *vdev)
|
|||
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus);
|
||||
DPRINTF("%s: plug device.\n", qbus->name);
|
||||
|
||||
bus->vdev = vdev;
|
||||
|
||||
if (klass->device_plugged != NULL) {
|
||||
klass->device_plugged(qbus->parent);
|
||||
}
|
||||
|
@ -58,73 +56,83 @@ int virtio_bus_plug_device(VirtIODevice *vdev)
|
|||
/* Reset the virtio_bus */
|
||||
void virtio_bus_reset(VirtioBusState *bus)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
|
||||
DPRINTF("%s: reset device.\n", qbus->name);
|
||||
if (bus->vdev != NULL) {
|
||||
virtio_reset(bus->vdev);
|
||||
if (vdev != NULL) {
|
||||
virtio_reset(vdev);
|
||||
}
|
||||
}
|
||||
|
||||
/* Destroy the VirtIODevice */
|
||||
void virtio_bus_destroy_device(VirtioBusState *bus)
|
||||
/* A VirtIODevice is being unplugged */
|
||||
void virtio_bus_device_unplugged(VirtIODevice *vdev)
|
||||
{
|
||||
BusState *qbus = BUS(bus);
|
||||
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus);
|
||||
DeviceState *qdev = DEVICE(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(qdev));
|
||||
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
|
||||
DPRINTF("%s: remove device.\n", qbus->name);
|
||||
|
||||
if (bus->vdev != NULL) {
|
||||
if (klass->device_unplug != NULL) {
|
||||
klass->device_unplug(qbus->parent);
|
||||
if (vdev != NULL) {
|
||||
if (klass->device_unplugged != NULL) {
|
||||
klass->device_unplugged(qbus->parent);
|
||||
}
|
||||
object_unparent(OBJECT(bus->vdev));
|
||||
bus->vdev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the device id of the plugged device. */
|
||||
uint16_t virtio_bus_get_vdev_id(VirtioBusState *bus)
|
||||
{
|
||||
assert(bus->vdev != NULL);
|
||||
return bus->vdev->device_id;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
assert(vdev != NULL);
|
||||
return vdev->device_id;
|
||||
}
|
||||
|
||||
/* Get the config_len field of the plugged device. */
|
||||
size_t virtio_bus_get_vdev_config_len(VirtioBusState *bus)
|
||||
{
|
||||
assert(bus->vdev != NULL);
|
||||
return bus->vdev->config_len;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
assert(vdev != NULL);
|
||||
return vdev->config_len;
|
||||
}
|
||||
|
||||
/* Get the features of the plugged device. */
|
||||
uint32_t virtio_bus_get_vdev_features(VirtioBusState *bus,
|
||||
uint32_t requested_features)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
VirtioDeviceClass *k;
|
||||
assert(bus->vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(bus->vdev);
|
||||
|
||||
assert(vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
assert(k->get_features != NULL);
|
||||
return k->get_features(bus->vdev, requested_features);
|
||||
return k->get_features(vdev, requested_features);
|
||||
}
|
||||
|
||||
/* Set the features of the plugged device. */
|
||||
void virtio_bus_set_vdev_features(VirtioBusState *bus,
|
||||
uint32_t requested_features)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
VirtioDeviceClass *k;
|
||||
assert(bus->vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(bus->vdev);
|
||||
|
||||
assert(vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
if (k->set_features != NULL) {
|
||||
k->set_features(bus->vdev, requested_features);
|
||||
k->set_features(vdev, requested_features);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get bad features of the plugged device. */
|
||||
uint32_t virtio_bus_get_vdev_bad_features(VirtioBusState *bus)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
VirtioDeviceClass *k;
|
||||
assert(bus->vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(bus->vdev);
|
||||
|
||||
assert(vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
if (k->bad_features != NULL) {
|
||||
return k->bad_features(bus->vdev);
|
||||
return k->bad_features(vdev);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
@ -133,22 +141,26 @@ uint32_t virtio_bus_get_vdev_bad_features(VirtioBusState *bus)
|
|||
/* Get config of the plugged device. */
|
||||
void virtio_bus_get_vdev_config(VirtioBusState *bus, uint8_t *config)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
VirtioDeviceClass *k;
|
||||
assert(bus->vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(bus->vdev);
|
||||
|
||||
assert(vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
if (k->get_config != NULL) {
|
||||
k->get_config(bus->vdev, config);
|
||||
k->get_config(vdev, config);
|
||||
}
|
||||
}
|
||||
|
||||
/* Set config of the plugged device. */
|
||||
void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
VirtioDeviceClass *k;
|
||||
assert(bus->vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(bus->vdev);
|
||||
|
||||
assert(vdev != NULL);
|
||||
k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
if (k->set_config != NULL) {
|
||||
k->set_config(bus->vdev, config);
|
||||
k->set_config(vdev, config);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ static void virtio_mmio_bus_new(VirtioBusState *bus, size_t bus_size,
|
|||
static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
|
||||
VirtIODevice *vdev = proxy->bus.vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
|
||||
|
||||
|
@ -185,7 +185,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
|
|||
unsigned size)
|
||||
{
|
||||
VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
|
||||
VirtIODevice *vdev = proxy->bus.vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
|
||||
(int)offset, value);
|
||||
|
@ -298,12 +298,13 @@ static const MemoryRegionOps virtio_mem_ops = {
|
|||
static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
|
||||
{
|
||||
VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int level;
|
||||
|
||||
if (!proxy->bus.vdev) {
|
||||
if (!vdev) {
|
||||
return;
|
||||
}
|
||||
level = (proxy->bus.vdev->isr != 0);
|
||||
level = (vdev->isr != 0);
|
||||
DPRINTF("virtio_mmio setting IRQ %d\n", level);
|
||||
qemu_set_irq(proxy->irq, level);
|
||||
}
|
||||
|
|
|
@ -113,31 +113,40 @@ static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
|
|||
static void virtio_pci_notify(DeviceState *d, uint16_t vector)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
|
||||
|
||||
if (msix_enabled(&proxy->pci_dev))
|
||||
msix_notify(&proxy->pci_dev, vector);
|
||||
else
|
||||
pci_set_irq(&proxy->pci_dev, proxy->vdev->isr & 1);
|
||||
else {
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
pci_set_irq(&proxy->pci_dev, vdev->isr & 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
pci_device_save(&proxy->pci_dev, f);
|
||||
msix_save(&proxy->pci_dev, f);
|
||||
if (msix_present(&proxy->pci_dev))
|
||||
qemu_put_be16(f, proxy->vdev->config_vector);
|
||||
qemu_put_be16(f, vdev->config_vector);
|
||||
}
|
||||
|
||||
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
if (msix_present(&proxy->pci_dev))
|
||||
qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
|
||||
qemu_put_be16(f, virtio_queue_vector(vdev, n));
|
||||
}
|
||||
|
||||
static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
int ret;
|
||||
ret = pci_device_load(&proxy->pci_dev, f);
|
||||
if (ret) {
|
||||
|
@ -146,12 +155,12 @@ static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
|
|||
msix_unuse_all_vectors(&proxy->pci_dev);
|
||||
msix_load(&proxy->pci_dev, f);
|
||||
if (msix_present(&proxy->pci_dev)) {
|
||||
qemu_get_be16s(f, &proxy->vdev->config_vector);
|
||||
qemu_get_be16s(f, &vdev->config_vector);
|
||||
} else {
|
||||
proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
|
||||
vdev->config_vector = VIRTIO_NO_VECTOR;
|
||||
}
|
||||
if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
|
||||
return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
|
||||
if (vdev->config_vector != VIRTIO_NO_VECTOR) {
|
||||
return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -159,13 +168,15 @@ static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
|
|||
static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
uint16_t vector;
|
||||
if (msix_present(&proxy->pci_dev)) {
|
||||
qemu_get_be16s(f, &vector);
|
||||
} else {
|
||||
vector = VIRTIO_NO_VECTOR;
|
||||
}
|
||||
virtio_queue_set_vector(proxy->vdev, n, vector);
|
||||
virtio_queue_set_vector(vdev, n, vector);
|
||||
if (vector != VIRTIO_NO_VECTOR) {
|
||||
return msix_vector_use(&proxy->pci_dev, vector);
|
||||
}
|
||||
|
@ -175,7 +186,8 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
|
|||
static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
|
||||
int n, bool assign, bool set_handler)
|
||||
{
|
||||
VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, n);
|
||||
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
|
||||
int r = 0;
|
||||
|
||||
|
@ -200,6 +212,7 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
|
|||
|
||||
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int n, r;
|
||||
|
||||
if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
|
||||
|
@ -209,7 +222,7 @@ static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
|
|||
}
|
||||
|
||||
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
|
||||
if (!virtio_queue_get_num(proxy->vdev, n)) {
|
||||
if (!virtio_queue_get_num(vdev, n)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -223,7 +236,7 @@ static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
|
|||
|
||||
assign_error:
|
||||
while (--n >= 0) {
|
||||
if (!virtio_queue_get_num(proxy->vdev, n)) {
|
||||
if (!virtio_queue_get_num(vdev, n)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -236,6 +249,7 @@ assign_error:
|
|||
|
||||
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
|
||||
{
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int r;
|
||||
int n;
|
||||
|
||||
|
@ -244,7 +258,7 @@ static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
|
|||
}
|
||||
|
||||
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
|
||||
if (!virtio_queue_get_num(proxy->vdev, n)) {
|
||||
if (!virtio_queue_get_num(vdev, n)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -257,7 +271,7 @@ static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
|
|||
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
hwaddr pa;
|
||||
|
||||
switch (addr) {
|
||||
|
@ -272,7 +286,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
|||
pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
||||
if (pa == 0) {
|
||||
virtio_pci_stop_ioeventfd(proxy);
|
||||
virtio_reset(proxy->vdev);
|
||||
virtio_reset(vdev);
|
||||
msix_unuse_all_vectors(&proxy->pci_dev);
|
||||
}
|
||||
else
|
||||
|
@ -299,7 +313,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
|||
}
|
||||
|
||||
if (vdev->status == 0) {
|
||||
virtio_reset(proxy->vdev);
|
||||
virtio_reset(vdev);
|
||||
msix_unuse_all_vectors(&proxy->pci_dev);
|
||||
}
|
||||
|
||||
|
@ -335,7 +349,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
|
|||
|
||||
static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
|
||||
{
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
uint32_t ret = 0xFFFFFFFF;
|
||||
|
||||
switch (addr) {
|
||||
|
@ -381,6 +395,7 @@ static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
|
|||
unsigned size)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
uint64_t val = 0;
|
||||
if (addr < config) {
|
||||
|
@ -390,16 +405,16 @@ static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
|
|||
|
||||
switch (size) {
|
||||
case 1:
|
||||
val = virtio_config_readb(proxy->vdev, addr);
|
||||
val = virtio_config_readb(vdev, addr);
|
||||
break;
|
||||
case 2:
|
||||
val = virtio_config_readw(proxy->vdev, addr);
|
||||
val = virtio_config_readw(vdev, addr);
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
val = virtio_config_readl(proxy->vdev, addr);
|
||||
val = virtio_config_readl(vdev, addr);
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
|
@ -413,6 +428,7 @@ static void virtio_pci_config_write(void *opaque, hwaddr addr,
|
|||
{
|
||||
VirtIOPCIProxy *proxy = opaque;
|
||||
uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
if (addr < config) {
|
||||
virtio_ioport_write(proxy, addr, val);
|
||||
return;
|
||||
|
@ -424,19 +440,19 @@ static void virtio_pci_config_write(void *opaque, hwaddr addr,
|
|||
*/
|
||||
switch (size) {
|
||||
case 1:
|
||||
virtio_config_writeb(proxy->vdev, addr, val);
|
||||
virtio_config_writeb(vdev, addr, val);
|
||||
break;
|
||||
case 2:
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap16(val);
|
||||
}
|
||||
virtio_config_writew(proxy->vdev, addr, val);
|
||||
virtio_config_writew(vdev, addr, val);
|
||||
break;
|
||||
case 4:
|
||||
if (virtio_is_big_endian()) {
|
||||
val = bswap32(val);
|
||||
}
|
||||
virtio_config_writel(proxy->vdev, addr, val);
|
||||
virtio_config_writel(vdev, addr, val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -455,6 +471,7 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
|
|||
uint32_t val, int len)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
pci_default_write_config(pci_dev, address, val, len);
|
||||
|
||||
|
@ -462,8 +479,7 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
|
|||
!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
|
||||
!(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
|
||||
virtio_pci_stop_ioeventfd(proxy);
|
||||
virtio_set_status(proxy->vdev,
|
||||
proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -506,7 +522,8 @@ static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
|
|||
unsigned int vector)
|
||||
{
|
||||
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
||||
VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, queue_no);
|
||||
EventNotifier *n = virtio_queue_get_guest_notifier(vq);
|
||||
int ret;
|
||||
ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, NULL, irqfd->virq);
|
||||
|
@ -517,7 +534,8 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
|
|||
unsigned int queue_no,
|
||||
unsigned int vector)
|
||||
{
|
||||
VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, queue_no);
|
||||
EventNotifier *n = virtio_queue_get_guest_notifier(vq);
|
||||
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
||||
int ret;
|
||||
|
@ -529,7 +547,7 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
|
|||
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
|
||||
{
|
||||
PCIDevice *dev = &proxy->pci_dev;
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
unsigned int vector;
|
||||
int ret, queue_no;
|
||||
|
@ -578,7 +596,7 @@ undo:
|
|||
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
|
||||
{
|
||||
PCIDevice *dev = &proxy->pci_dev;
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
unsigned int vector;
|
||||
int queue_no;
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
|
@ -606,8 +624,9 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
|
|||
unsigned int vector,
|
||||
MSIMessage msg)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(proxy->vdev);
|
||||
VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, queue_no);
|
||||
EventNotifier *n = virtio_queue_get_guest_notifier(vq);
|
||||
VirtIOIRQFD *irqfd;
|
||||
int ret = 0;
|
||||
|
@ -626,10 +645,10 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
|
|||
* Otherwise, set it up now.
|
||||
*/
|
||||
if (k->guest_notifier_mask) {
|
||||
k->guest_notifier_mask(proxy->vdev, queue_no, false);
|
||||
k->guest_notifier_mask(vdev, queue_no, false);
|
||||
/* Test after unmasking to avoid losing events. */
|
||||
if (k->guest_notifier_pending &&
|
||||
k->guest_notifier_pending(proxy->vdev, queue_no)) {
|
||||
k->guest_notifier_pending(vdev, queue_no)) {
|
||||
event_notifier_set(n);
|
||||
}
|
||||
} else {
|
||||
|
@ -642,13 +661,14 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
|
|||
unsigned int queue_no,
|
||||
unsigned int vector)
|
||||
{
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(proxy->vdev);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
|
||||
/* If guest supports masking, keep irqfd but mask it.
|
||||
* Otherwise, clean it up now.
|
||||
*/
|
||||
if (k->guest_notifier_mask) {
|
||||
k->guest_notifier_mask(proxy->vdev, queue_no, true);
|
||||
k->guest_notifier_mask(vdev, queue_no, true);
|
||||
} else {
|
||||
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
|
||||
}
|
||||
|
@ -658,7 +678,7 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
|
|||
MSIMessage msg)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int ret, queue_no;
|
||||
|
||||
for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
|
||||
|
@ -688,7 +708,7 @@ undo:
|
|||
static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
int queue_no;
|
||||
|
||||
for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
|
||||
|
@ -707,7 +727,7 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
|
|||
unsigned int vector_end)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
int queue_no;
|
||||
unsigned int vector;
|
||||
|
@ -739,8 +759,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
|
|||
bool with_irqfd)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(proxy->vdev);
|
||||
VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
VirtQueue *vq = virtio_get_queue(vdev, n);
|
||||
EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
|
||||
|
||||
if (assign) {
|
||||
|
@ -755,7 +776,7 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
|
|||
}
|
||||
|
||||
if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
|
||||
vdc->guest_notifier_mask(proxy->vdev, n, !assign);
|
||||
vdc->guest_notifier_mask(vdev, n, !assign);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -770,7 +791,7 @@ static bool virtio_pci_query_guest_notifiers(DeviceState *d)
|
|||
static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtIODevice *vdev = proxy->vdev;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
int r, n;
|
||||
bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
|
||||
|
@ -864,11 +885,12 @@ static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
|
|||
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
|
||||
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
||||
|
||||
if (running) {
|
||||
/* Try to find out if the guest has bus master disabled, but is
|
||||
in ready state. Then we have a buggy guest OS. */
|
||||
if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
||||
if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
||||
!(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
|
||||
proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
|
||||
}
|
||||
|
@ -943,8 +965,6 @@ static void virtio_pci_device_plugged(DeviceState *d)
|
|||
uint8_t *config;
|
||||
uint32_t size;
|
||||
|
||||
proxy->vdev = bus->vdev;
|
||||
|
||||
config = proxy->pci_dev.config;
|
||||
if (proxy->class_code) {
|
||||
pci_config_set_class(config, proxy->class_code);
|
||||
|
@ -982,6 +1002,15 @@ static void virtio_pci_device_plugged(DeviceState *d)
|
|||
proxy->host_features);
|
||||
}
|
||||
|
||||
static void virtio_pci_device_unplugged(DeviceState *d)
|
||||
{
|
||||
PCIDevice *pci_dev = PCI_DEVICE(d);
|
||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
||||
|
||||
virtio_pci_stop_ioeventfd(proxy);
|
||||
msix_uninit_exclusive_bar(pci_dev);
|
||||
}
|
||||
|
||||
static int virtio_pci_init(PCIDevice *pci_dev)
|
||||
{
|
||||
VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
|
||||
|
@ -996,9 +1025,7 @@ static int virtio_pci_init(PCIDevice *pci_dev)
|
|||
static void virtio_pci_exit(PCIDevice *pci_dev)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
|
||||
virtio_pci_stop_ioeventfd(proxy);
|
||||
memory_region_destroy(&proxy->bar);
|
||||
msix_uninit_exclusive_bar(pci_dev);
|
||||
}
|
||||
|
||||
static void virtio_pci_reset(DeviceState *qdev)
|
||||
|
@ -1533,6 +1560,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
|
|||
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
|
||||
k->vmstate_change = virtio_pci_vmstate_change;
|
||||
k->device_plugged = virtio_pci_device_plugged;
|
||||
k->device_unplugged = virtio_pci_device_unplugged;
|
||||
}
|
||||
|
||||
static const TypeInfo virtio_pci_bus_info = {
|
||||
|
|
|
@ -82,7 +82,6 @@ typedef struct VirtioPCIClass {
|
|||
|
||||
struct VirtIOPCIProxy {
|
||||
PCIDevice pci_dev;
|
||||
VirtIODevice *vdev;
|
||||
MemoryRegion bar;
|
||||
uint32_t flags;
|
||||
uint32_t class_code;
|
||||
|
|
|
@ -190,16 +190,14 @@ static int virtio_rng_device_init(VirtIODevice *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_rng_device_exit(DeviceState *qdev)
|
||||
static void virtio_rng_device_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIORNG *vrng = VIRTIO_RNG(qdev);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VirtIORNG *vrng = VIRTIO_RNG(vdev);
|
||||
|
||||
timer_del(vrng->rate_limit_timer);
|
||||
timer_free(vrng->rate_limit_timer);
|
||||
unregister_savevm(qdev, "virtio-rng", vrng);
|
||||
unregister_savevm(DEVICE(vdev), "virtio-rng", vrng);
|
||||
virtio_cleanup(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static Property virtio_rng_properties[] = {
|
||||
|
@ -211,10 +209,10 @@ static void virtio_rng_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
dc->exit = virtio_rng_device_exit;
|
||||
dc->props = virtio_rng_properties;
|
||||
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
||||
vdc->init = virtio_rng_device_init;
|
||||
vdc->exit = virtio_rng_device_exit;
|
||||
vdc->get_features = get_features;
|
||||
}
|
||||
|
||||
|
|
|
@ -427,6 +427,12 @@ void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
|
|||
unsigned int i;
|
||||
hwaddr len;
|
||||
|
||||
if (num_sg > VIRTQUEUE_MAX_SIZE) {
|
||||
error_report("virtio: map attempt out of bounds: %zd > %d",
|
||||
num_sg, VIRTQUEUE_MAX_SIZE);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_sg; i++) {
|
||||
len = sg[i].iov_len;
|
||||
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
|
||||
|
@ -888,7 +894,9 @@ int virtio_set_features(VirtIODevice *vdev, uint32_t val)
|
|||
|
||||
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
|
||||
{
|
||||
int num, i, ret;
|
||||
int i, ret;
|
||||
int32_t config_len;
|
||||
uint32_t num;
|
||||
uint32_t features;
|
||||
uint32_t supported_features;
|
||||
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
||||
|
@ -903,6 +911,9 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f)
|
|||
qemu_get_8s(f, &vdev->status);
|
||||
qemu_get_8s(f, &vdev->isr);
|
||||
qemu_get_be16s(f, &vdev->queue_sel);
|
||||
if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
|
||||
return -1;
|
||||
}
|
||||
qemu_get_be32s(f, &features);
|
||||
|
||||
if (virtio_set_features(vdev, features) < 0) {
|
||||
|
@ -911,11 +922,27 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f)
|
|||
features, supported_features);
|
||||
return -1;
|
||||
}
|
||||
vdev->config_len = qemu_get_be32(f);
|
||||
qemu_get_buffer(f, vdev->config, vdev->config_len);
|
||||
config_len = qemu_get_be32(f);
|
||||
|
||||
/*
|
||||
* There are cases where the incoming config can be bigger or smaller
|
||||
* than what we have; so load what we have space for, and skip
|
||||
* any excess that's in the stream.
|
||||
*/
|
||||
qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
|
||||
|
||||
while (config_len > vdev->config_len) {
|
||||
qemu_get_byte(f);
|
||||
config_len--;
|
||||
}
|
||||
|
||||
num = qemu_get_be32(f);
|
||||
|
||||
if (num > VIRTIO_PCI_QUEUE_MAX) {
|
||||
error_report("Invalid number of PCI queues: 0x%x", num);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
vdev->vq[i].vring.num = qemu_get_be32(f);
|
||||
if (k->has_variable_vring_alignment) {
|
||||
|
@ -1158,14 +1185,19 @@ static int virtio_device_init(DeviceState *qdev)
|
|||
if (k->init(vdev) < 0) {
|
||||
return -1;
|
||||
}
|
||||
virtio_bus_plug_device(vdev);
|
||||
virtio_bus_device_plugged(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_device_exit(DeviceState *qdev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(qdev);
|
||||
|
||||
virtio_bus_device_unplugged(vdev);
|
||||
if (k->exit) {
|
||||
k->exit(vdev);
|
||||
}
|
||||
if (vdev->bus_name) {
|
||||
g_free(vdev->bus_name);
|
||||
vdev->bus_name = NULL;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue