mirror of https://github.com/xemu-project/xemu.git
-Wshadow=local patches patches for 2023-09-29
-----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEENUvIs9frKmtoZ05fOHC0AOuRhlMFAmUWhnsSHGFybWJydUBy ZWRoYXQuY29tAAoJEDhwtADrkYZTDBkP/2E8cyH+fn7yehNAZT8fjBuDBaj0x3wf Bs4++bMEZpgfA/11le/Mm+N9BFDtoGj4dnDwQ0yN6bcKcfmNvxh+M+lNaRO+xvXA qs/kJtFYkJYuEj1wgKK2XXd4YcD/S4Qap+FSuUBv8KE/oeALkB1fEpvMcwtJtQqc 7POQEqYNQfUe+MX/wKZ+qditbbrFRwX69dAd8+nGTbFestXd2uFA5I5kv3ebxELg VjTBgQdp7s82iTvoXpTtmQ6A9ba13zmelxmsAMLlAihkbffMwbtbrkQ7qIIUOW1o I4WPxhIXXyZbB48qARUq5G3GQuh+7dRArcpYWaFel2a6cjm2Z6NmWJeRAr0cIaWV P5B79k7DO551YsBZn+ubH0U+qwMLw+zq2apQ+SeH/loE0pP/c2OBOPtaVI46D0Dh 2kgaSuTIy9AByAHoYBxKnxy4TVwPKzk8hdzCQdiRSO7KJdMqMsV+/w1eR4oH9dsf CAvJXVzLicFMMABA/4O99K+1yjIOQpwmiqAjc+gV6FdhwllSH3yQDiK4RMWNAwRu bRQHBCk143t7cM3ts09T+5QxkWB3U0iGMJ4rpn43yjH5xwlWmpTlztvd7XlXwyTR 8j2Z+8qxe992HmVk34rKdkGnu0qz4AhJBgAEEk2e0oepZvjfigqodQwEMCQsse5t cH51HzTDuen/ =XVKC -----END PGP SIGNATURE----- Merge tag 'pull-shadow-2023-09-29' of https://repo.or.cz/qemu/armbru into staging -Wshadow=local patches patches for 2023-09-29 # -----BEGIN PGP SIGNATURE----- # # iQJGBAABCAAwFiEENUvIs9frKmtoZ05fOHC0AOuRhlMFAmUWhnsSHGFybWJydUBy # ZWRoYXQuY29tAAoJEDhwtADrkYZTDBkP/2E8cyH+fn7yehNAZT8fjBuDBaj0x3wf # Bs4++bMEZpgfA/11le/Mm+N9BFDtoGj4dnDwQ0yN6bcKcfmNvxh+M+lNaRO+xvXA # qs/kJtFYkJYuEj1wgKK2XXd4YcD/S4Qap+FSuUBv8KE/oeALkB1fEpvMcwtJtQqc # 7POQEqYNQfUe+MX/wKZ+qditbbrFRwX69dAd8+nGTbFestXd2uFA5I5kv3ebxELg # VjTBgQdp7s82iTvoXpTtmQ6A9ba13zmelxmsAMLlAihkbffMwbtbrkQ7qIIUOW1o # I4WPxhIXXyZbB48qARUq5G3GQuh+7dRArcpYWaFel2a6cjm2Z6NmWJeRAr0cIaWV # P5B79k7DO551YsBZn+ubH0U+qwMLw+zq2apQ+SeH/loE0pP/c2OBOPtaVI46D0Dh # 2kgaSuTIy9AByAHoYBxKnxy4TVwPKzk8hdzCQdiRSO7KJdMqMsV+/w1eR4oH9dsf # CAvJXVzLicFMMABA/4O99K+1yjIOQpwmiqAjc+gV6FdhwllSH3yQDiK4RMWNAwRu # bRQHBCk143t7cM3ts09T+5QxkWB3U0iGMJ4rpn43yjH5xwlWmpTlztvd7XlXwyTR # 8j2Z+8qxe992HmVk34rKdkGnu0qz4AhJBgAEEk2e0oepZvjfigqodQwEMCQsse5t # cH51HzTDuen/ # =XVKC # -----END PGP SIGNATURE----- # gpg: Signature made Fri 29 Sep 2023 04:10:35 EDT # gpg: using RSA key 354BC8B3D7EB2A6B68674E5F3870B400EB918653 # gpg: issuer "armbru@redhat.com" # gpg: Good signature from "Markus Armbruster <armbru@redhat.com>" [full] # gpg: aka "Markus Armbruster <armbru@pond.sub.org>" [full] # Primary key fingerprint: 354B C8B3 D7EB 2A6B 6867 4E5F 3870 B400 EB91 8653 * tag 'pull-shadow-2023-09-29' of https://repo.or.cz/qemu/armbru: (56 commits) disas/m68k: clean up local variable shadowing hw/nvme: Clean up local variable shadowing in nvme_ns_init() softmmu/device_tree: Fixup local variables shadowing target/riscv: vector_helper: Fixup local variables shadowing target/riscv: cpu: Fixup local variables shadowing hw/riscv: opentitan: Fixup local variables shadowing qemu-nbd: changes towards enabling -Wshadow=local seccomp: avoid shadowing of 'action' variable crypto: remove shadowed 'ret' variable intel_iommu: Fix shadow local variables on "size" aspeed/timer: Clean up local variable shadowing aspeed/i3c: Rename variable shadowing a local aspeed: Clean up local variable shadowing aspeed/i2c: Clean up local variable shadowing hw/arm/smmuv3-internal.h: Don't use locals in statement macros hw/arm/smmuv3.c: Avoid shadowing variable hw/misc/arm_sysctl.c: Avoid shadowing local variable hw/intc/arm_gicv3_its: Avoid shadowing variable in do_process_its_cmd() hw/acpi: changes towards enabling -Wshadow=local test-throttle: don't shadow 'index' variable in do_test_accounting() ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
5d7e601df3
|
@ -207,13 +207,12 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
|
||||||
{
|
{
|
||||||
PageDesc *pd;
|
PageDesc *pd;
|
||||||
void **lp;
|
void **lp;
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Level 1. Always allocated. */
|
/* Level 1. Always allocated. */
|
||||||
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
|
||||||
|
|
||||||
/* Level 2..N-1. */
|
/* Level 2..N-1. */
|
||||||
for (i = v_l2_levels; i > 0; i--) {
|
for (int i = v_l2_levels; i > 0; i--) {
|
||||||
void **p = qatomic_rcu_read(lp);
|
void **p = qatomic_rcu_read(lp);
|
||||||
|
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
|
|
9
block.c
9
block.c
|
@ -3072,18 +3072,19 @@ bdrv_attach_child_common(BlockDriverState *child_bs,
|
||||||
&local_err);
|
&local_err);
|
||||||
|
|
||||||
if (ret < 0 && child_class->change_aio_ctx) {
|
if (ret < 0 && child_class->change_aio_ctx) {
|
||||||
Transaction *tran = tran_new();
|
Transaction *aio_ctx_tran = tran_new();
|
||||||
GHashTable *visited = g_hash_table_new(NULL, NULL);
|
GHashTable *visited = g_hash_table_new(NULL, NULL);
|
||||||
bool ret_child;
|
bool ret_child;
|
||||||
|
|
||||||
g_hash_table_add(visited, new_child);
|
g_hash_table_add(visited, new_child);
|
||||||
ret_child = child_class->change_aio_ctx(new_child, child_ctx,
|
ret_child = child_class->change_aio_ctx(new_child, child_ctx,
|
||||||
visited, tran, NULL);
|
visited, aio_ctx_tran,
|
||||||
|
NULL);
|
||||||
if (ret_child == true) {
|
if (ret_child == true) {
|
||||||
error_free(local_err);
|
error_free(local_err);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
tran_finalize(tran, ret_child == true ? 0 : -1);
|
tran_finalize(aio_ctx_tran, ret_child == true ? 0 : -1);
|
||||||
g_hash_table_destroy(visited);
|
g_hash_table_destroy(visited);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6208,12 +6209,12 @@ void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
|
||||||
QLIST_FOREACH(drv, &bdrv_drivers, list) {
|
QLIST_FOREACH(drv, &bdrv_drivers, list) {
|
||||||
if (drv->format_name) {
|
if (drv->format_name) {
|
||||||
bool found = false;
|
bool found = false;
|
||||||
int i = count;
|
|
||||||
|
|
||||||
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, read_only)) {
|
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, read_only)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i = count;
|
||||||
while (formats && i && !found) {
|
while (formats && i && !found) {
|
||||||
found = !strcmp(formats[--i], drv->format_name);
|
found = !strcmp(formats[--i], drv->format_name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,37 +258,38 @@ void qmp_block_dirty_bitmap_disable(const char *node, const char *name,
|
||||||
bdrv_disable_dirty_bitmap(bitmap);
|
bdrv_disable_dirty_bitmap(bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
|
BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *dst_node,
|
||||||
|
const char *dst_bitmap,
|
||||||
BlockDirtyBitmapOrStrList *bms,
|
BlockDirtyBitmapOrStrList *bms,
|
||||||
HBitmap **backup, Error **errp)
|
HBitmap **backup, Error **errp)
|
||||||
{
|
{
|
||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
BdrvDirtyBitmap *dst, *src;
|
BdrvDirtyBitmap *dst, *src;
|
||||||
BlockDirtyBitmapOrStrList *lst;
|
BlockDirtyBitmapOrStrList *lst;
|
||||||
|
const char *src_node, *src_bitmap;
|
||||||
HBitmap *local_backup = NULL;
|
HBitmap *local_backup = NULL;
|
||||||
|
|
||||||
GLOBAL_STATE_CODE();
|
GLOBAL_STATE_CODE();
|
||||||
|
|
||||||
dst = block_dirty_bitmap_lookup(node, target, &bs, errp);
|
dst = block_dirty_bitmap_lookup(dst_node, dst_bitmap, &bs, errp);
|
||||||
if (!dst) {
|
if (!dst) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (lst = bms; lst; lst = lst->next) {
|
for (lst = bms; lst; lst = lst->next) {
|
||||||
switch (lst->value->type) {
|
switch (lst->value->type) {
|
||||||
const char *name, *node;
|
|
||||||
case QTYPE_QSTRING:
|
case QTYPE_QSTRING:
|
||||||
name = lst->value->u.local;
|
src_bitmap = lst->value->u.local;
|
||||||
src = bdrv_find_dirty_bitmap(bs, name);
|
src = bdrv_find_dirty_bitmap(bs, src_bitmap);
|
||||||
if (!src) {
|
if (!src) {
|
||||||
error_setg(errp, "Dirty bitmap '%s' not found", name);
|
error_setg(errp, "Dirty bitmap '%s' not found", src_bitmap);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case QTYPE_QDICT:
|
case QTYPE_QDICT:
|
||||||
node = lst->value->u.external.node;
|
src_node = lst->value->u.external.node;
|
||||||
name = lst->value->u.external.name;
|
src_bitmap = lst->value->u.external.name;
|
||||||
src = block_dirty_bitmap_lookup(node, name, NULL, errp);
|
src = block_dirty_bitmap_lookup(src_node, src_bitmap, NULL, errp);
|
||||||
if (!src) {
|
if (!src) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1555,7 +1555,6 @@ bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs,
|
||||||
FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
|
FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
|
||||||
const char *name = bdrv_dirty_bitmap_name(bitmap);
|
const char *name = bdrv_dirty_bitmap_name(bitmap);
|
||||||
uint32_t granularity = bdrv_dirty_bitmap_granularity(bitmap);
|
uint32_t granularity = bdrv_dirty_bitmap_granularity(bitmap);
|
||||||
Qcow2Bitmap *bm;
|
|
||||||
|
|
||||||
if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
|
if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
|
||||||
bdrv_dirty_bitmap_inconsistent(bitmap)) {
|
bdrv_dirty_bitmap_inconsistent(bitmap)) {
|
||||||
|
@ -1625,7 +1624,7 @@ bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs,
|
||||||
|
|
||||||
/* allocate clusters and store bitmaps */
|
/* allocate clusters and store bitmaps */
|
||||||
QSIMPLEQ_FOREACH(bm, bm_list, entry) {
|
QSIMPLEQ_FOREACH(bm, bm_list, entry) {
|
||||||
BdrvDirtyBitmap *bitmap = bm->dirty_bitmap;
|
bitmap = bm->dirty_bitmap;
|
||||||
|
|
||||||
if (bitmap == NULL || bdrv_dirty_bitmap_readonly(bitmap)) {
|
if (bitmap == NULL || bdrv_dirty_bitmap_readonly(bitmap)) {
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1290,7 +1290,7 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs,
|
||||||
* operations that exceed the current size.
|
* operations that exceed the current size.
|
||||||
*/
|
*/
|
||||||
if (offset + bytes > s->image_size) {
|
if (offset + bytes > s->image_size) {
|
||||||
int r = qemu_rbd_resize(bs, offset + bytes);
|
r = qemu_rbd_resize(bs, offset + bytes);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -292,7 +292,6 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||||
/* Make sure that the image is opened in read-write mode */
|
/* Make sure that the image is opened in read-write mode */
|
||||||
bs_read_only = bdrv_is_read_only(bs);
|
bs_read_only = bdrv_is_read_only(bs);
|
||||||
if (bs_read_only) {
|
if (bs_read_only) {
|
||||||
int ret;
|
|
||||||
/* Hold the chain during reopen */
|
/* Hold the chain during reopen */
|
||||||
if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) {
|
if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -634,7 +634,6 @@ vdi_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||||
bmap_entry = le32_to_cpu(s->bmap[block_index]);
|
bmap_entry = le32_to_cpu(s->bmap[block_index]);
|
||||||
if (!VDI_IS_ALLOCATED(bmap_entry)) {
|
if (!VDI_IS_ALLOCATED(bmap_entry)) {
|
||||||
/* Allocate new block and write to it. */
|
/* Allocate new block and write to it. */
|
||||||
uint64_t data_offset;
|
|
||||||
qemu_co_rwlock_upgrade(&s->bmap_lock);
|
qemu_co_rwlock_upgrade(&s->bmap_lock);
|
||||||
bmap_entry = le32_to_cpu(s->bmap[block_index]);
|
bmap_entry = le32_to_cpu(s->bmap[block_index]);
|
||||||
if (VDI_IS_ALLOCATED(bmap_entry)) {
|
if (VDI_IS_ALLOCATED(bmap_entry)) {
|
||||||
|
@ -700,7 +699,7 @@ nonallocating_write:
|
||||||
/* One or more new blocks were allocated. */
|
/* One or more new blocks were allocated. */
|
||||||
VdiHeader *header;
|
VdiHeader *header;
|
||||||
uint8_t *base;
|
uint8_t *base;
|
||||||
uint64_t offset;
|
uint64_t bmap_offset;
|
||||||
uint32_t n_sectors;
|
uint32_t n_sectors;
|
||||||
|
|
||||||
g_free(block);
|
g_free(block);
|
||||||
|
@ -723,11 +722,11 @@ nonallocating_write:
|
||||||
bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
|
bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
|
||||||
bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
|
bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
|
||||||
n_sectors = bmap_last - bmap_first + 1;
|
n_sectors = bmap_last - bmap_first + 1;
|
||||||
offset = s->bmap_sector + bmap_first;
|
bmap_offset = s->bmap_sector + bmap_first;
|
||||||
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
|
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
|
||||||
logout("will write %u block map sectors starting from entry %u\n",
|
logout("will write %u block map sectors starting from entry %u\n",
|
||||||
n_sectors, bmap_first);
|
n_sectors, bmap_first);
|
||||||
ret = bdrv_co_pwrite(bs->file, offset * SECTOR_SIZE,
|
ret = bdrv_co_pwrite(bs->file, bmap_offset * SECTOR_SIZE,
|
||||||
n_sectors * SECTOR_SIZE, base, 0);
|
n_sectors * SECTOR_SIZE, base, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -777,7 +777,6 @@ static int read_directory(BDRVVVFATState* s, int mapping_index)
|
||||||
while((entry=readdir(dir))) {
|
while((entry=readdir(dir))) {
|
||||||
unsigned int length=strlen(dirname)+2+strlen(entry->d_name);
|
unsigned int length=strlen(dirname)+2+strlen(entry->d_name);
|
||||||
char* buffer;
|
char* buffer;
|
||||||
direntry_t* direntry;
|
|
||||||
struct stat st;
|
struct stat st;
|
||||||
int is_dot=!strcmp(entry->d_name,".");
|
int is_dot=!strcmp(entry->d_name,".");
|
||||||
int is_dotdot=!strcmp(entry->d_name,"..");
|
int is_dotdot=!strcmp(entry->d_name,"..");
|
||||||
|
@ -857,7 +856,7 @@ static int read_directory(BDRVVVFATState* s, int mapping_index)
|
||||||
|
|
||||||
/* fill with zeroes up to the end of the cluster */
|
/* fill with zeroes up to the end of the cluster */
|
||||||
while(s->directory.next%(0x10*s->sectors_per_cluster)) {
|
while(s->directory.next%(0x10*s->sectors_per_cluster)) {
|
||||||
direntry_t* direntry=array_get_next(&(s->directory));
|
direntry = array_get_next(&(s->directory));
|
||||||
memset(direntry,0,sizeof(direntry_t));
|
memset(direntry,0,sizeof(direntry_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1962,24 +1961,24 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
|
||||||
* This is horribly inefficient, but that is okay, since
|
* This is horribly inefficient, but that is okay, since
|
||||||
* it is rarely executed, if at all.
|
* it is rarely executed, if at all.
|
||||||
*/
|
*/
|
||||||
int64_t offset = cluster2sector(s, cluster_num);
|
int64_t offs = cluster2sector(s, cluster_num);
|
||||||
|
|
||||||
vvfat_close_current_file(s);
|
vvfat_close_current_file(s);
|
||||||
for (i = 0; i < s->sectors_per_cluster; i++) {
|
for (i = 0; i < s->sectors_per_cluster; i++) {
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
res = bdrv_is_allocated(s->qcow->bs,
|
res = bdrv_is_allocated(s->qcow->bs,
|
||||||
(offset + i) * BDRV_SECTOR_SIZE,
|
(offs + i) * BDRV_SECTOR_SIZE,
|
||||||
BDRV_SECTOR_SIZE, NULL);
|
BDRV_SECTOR_SIZE, NULL);
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (!res) {
|
if (!res) {
|
||||||
res = vvfat_read(s->bs, offset, s->cluster_buffer, 1);
|
res = vvfat_read(s->bs, offs, s->cluster_buffer, 1);
|
||||||
if (res) {
|
if (res) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
res = bdrv_co_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE,
|
res = bdrv_co_pwrite(s->qcow, offs * BDRV_SECTOR_SIZE,
|
||||||
BDRV_SECTOR_SIZE, s->cluster_buffer,
|
BDRV_SECTOR_SIZE, s->cluster_buffer,
|
||||||
0);
|
0);
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
|
@ -2467,8 +2466,9 @@ commit_direntries(BDRVVVFATState* s, int dir_index, int parent_mapping_index)
|
||||||
|
|
||||||
for (c = first_cluster; !fat_eof(s, c); c = modified_fat_get(s, c)) {
|
for (c = first_cluster; !fat_eof(s, c); c = modified_fat_get(s, c)) {
|
||||||
direntry_t *first_direntry;
|
direntry_t *first_direntry;
|
||||||
void* direntry = array_get(&(s->directory), current_dir_index);
|
|
||||||
int ret = vvfat_read(s->bs, cluster2sector(s, c), direntry,
|
direntry = array_get(&(s->directory), current_dir_index);
|
||||||
|
ret = vvfat_read(s->bs, cluster2sector(s, c), (uint8_t *)direntry,
|
||||||
s->sectors_per_cluster);
|
s->sectors_per_cluster);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2690,12 +2690,12 @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s)
|
||||||
direntry_t* direntry = array_get(&(s->directory),
|
direntry_t* direntry = array_get(&(s->directory),
|
||||||
mapping->info.dir.first_dir_index);
|
mapping->info.dir.first_dir_index);
|
||||||
uint32_t c = mapping->begin;
|
uint32_t c = mapping->begin;
|
||||||
int i = 0;
|
int j = 0;
|
||||||
|
|
||||||
/* recurse */
|
/* recurse */
|
||||||
while (!fat_eof(s, c)) {
|
while (!fat_eof(s, c)) {
|
||||||
do {
|
do {
|
||||||
direntry_t* d = direntry + i;
|
direntry_t *d = direntry + j;
|
||||||
|
|
||||||
if (is_file(d) || (is_directory(d) && !is_dot(d))) {
|
if (is_file(d) || (is_directory(d) && !is_dot(d))) {
|
||||||
int l;
|
int l;
|
||||||
|
@ -2716,8 +2716,8 @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s)
|
||||||
|
|
||||||
schedule_rename(s, m->begin, new_path);
|
schedule_rename(s, m->begin, new_path);
|
||||||
}
|
}
|
||||||
i++;
|
j++;
|
||||||
} while((i % (0x10 * s->sectors_per_cluster)) != 0);
|
} while (j % (0x10 * s->sectors_per_cluster) != 0);
|
||||||
c = fat_get(s, c);
|
c = fat_get(s, c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2804,16 +2804,16 @@ static int coroutine_fn GRAPH_RDLOCK handle_commits(BDRVVVFATState* s)
|
||||||
int begin = commit->param.new_file.first_cluster;
|
int begin = commit->param.new_file.first_cluster;
|
||||||
mapping_t* mapping = find_mapping_for_cluster(s, begin);
|
mapping_t* mapping = find_mapping_for_cluster(s, begin);
|
||||||
direntry_t* entry;
|
direntry_t* entry;
|
||||||
int i;
|
int j;
|
||||||
|
|
||||||
/* find direntry */
|
/* find direntry */
|
||||||
for (i = 0; i < s->directory.next; i++) {
|
for (j = 0; j < s->directory.next; j++) {
|
||||||
entry = array_get(&(s->directory), i);
|
entry = array_get(&(s->directory), j);
|
||||||
if (is_file(entry) && begin_of_direntry(entry) == begin)
|
if (is_file(entry) && begin_of_direntry(entry) == begin)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i >= s->directory.next) {
|
if (j >= s->directory.next) {
|
||||||
fail = -6;
|
fail = -6;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -2833,8 +2833,9 @@ static int coroutine_fn GRAPH_RDLOCK handle_commits(BDRVVVFATState* s)
|
||||||
mapping->mode = MODE_NORMAL;
|
mapping->mode = MODE_NORMAL;
|
||||||
mapping->info.file.offset = 0;
|
mapping->info.file.offset = 0;
|
||||||
|
|
||||||
if (commit_one_file(s, i, 0))
|
if (commit_one_file(s, j, 0)) {
|
||||||
fail = -7;
|
fail = -7;
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,7 +113,7 @@ qcrypto_gnutls_cipher_encrypt(QCryptoCipher *cipher,
|
||||||
while (len) {
|
while (len) {
|
||||||
gnutls_cipher_hd_t handle;
|
gnutls_cipher_hd_t handle;
|
||||||
gnutls_datum_t gkey = { (unsigned char *)ctx->key, ctx->nkey };
|
gnutls_datum_t gkey = { (unsigned char *)ctx->key, ctx->nkey };
|
||||||
int err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
|
err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
error_setg(errp, "Cannot initialize cipher: %s",
|
error_setg(errp, "Cannot initialize cipher: %s",
|
||||||
gnutls_strerror(err));
|
gnutls_strerror(err));
|
||||||
|
@ -174,7 +174,7 @@ qcrypto_gnutls_cipher_decrypt(QCryptoCipher *cipher,
|
||||||
while (len) {
|
while (len) {
|
||||||
gnutls_cipher_hd_t handle;
|
gnutls_cipher_hd_t handle;
|
||||||
gnutls_datum_t gkey = { (unsigned char *)ctx->key, ctx->nkey };
|
gnutls_datum_t gkey = { (unsigned char *)ctx->key, ctx->nkey };
|
||||||
int err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
|
err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
error_setg(errp, "Cannot initialize cipher: %s",
|
error_setg(errp, "Cannot initialize cipher: %s",
|
||||||
gnutls_strerror(err));
|
gnutls_strerror(err));
|
||||||
|
|
|
@ -52,7 +52,6 @@ GByteArray *qcrypto_tls_cipher_suites_get_data(QCryptoTLSCipherSuites *obj,
|
||||||
byte_array = g_byte_array_new();
|
byte_array = g_byte_array_new();
|
||||||
|
|
||||||
for (i = 0;; i++) {
|
for (i = 0;; i++) {
|
||||||
int ret;
|
|
||||||
unsigned idx;
|
unsigned idx;
|
||||||
const char *name;
|
const char *name;
|
||||||
IANA_TLS_CIPHER cipher;
|
IANA_TLS_CIPHER cipher;
|
||||||
|
|
|
@ -1632,10 +1632,10 @@ print_insn_arg (const char *d,
|
||||||
case '2':
|
case '2':
|
||||||
case '3':
|
case '3':
|
||||||
{
|
{
|
||||||
int val = fetch_arg (buffer, place, 5, info);
|
int reg = fetch_arg (buffer, place, 5, info);
|
||||||
const char *name = 0;
|
const char *name = 0;
|
||||||
|
|
||||||
switch (val)
|
switch (reg)
|
||||||
{
|
{
|
||||||
case 2: name = "%tt0"; break;
|
case 2: name = "%tt0"; break;
|
||||||
case 3: name = "%tt1"; break;
|
case 3: name = "%tt1"; break;
|
||||||
|
@ -1655,12 +1655,12 @@ print_insn_arg (const char *d,
|
||||||
int break_reg = ((buffer[3] >> 2) & 7);
|
int break_reg = ((buffer[3] >> 2) & 7);
|
||||||
|
|
||||||
(*info->fprintf_func)
|
(*info->fprintf_func)
|
||||||
(info->stream, val == 0x1c ? "%%bad%d" : "%%bac%d",
|
(info->stream, reg == 0x1c ? "%%bad%d" : "%%bac%d",
|
||||||
break_reg);
|
break_reg);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
(*info->fprintf_func) (info->stream, "<mmu register %d>", val);
|
(*info->fprintf_func) (info->stream, "<mmu register %d>", reg);
|
||||||
}
|
}
|
||||||
if (name)
|
if (name)
|
||||||
(*info->fprintf_func) (info->stream, "%s", name);
|
(*info->fprintf_func) (info->stream, "%s", name);
|
||||||
|
|
|
@ -265,26 +265,27 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
|
||||||
|
|
||||||
/* build Processor object for each processor */
|
/* build Processor object for each processor */
|
||||||
for (i = 0; i < apic_ids->len; i++) {
|
for (i = 0; i < apic_ids->len; i++) {
|
||||||
int apic_id = apic_ids->cpus[i].arch_id;
|
int cpu_apic_id = apic_ids->cpus[i].arch_id;
|
||||||
|
|
||||||
assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
|
assert(cpu_apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
|
||||||
|
|
||||||
dev = aml_processor(i, 0, 0, "CP%.02X", apic_id);
|
dev = aml_processor(i, 0, 0, "CP%.02X", cpu_apic_id);
|
||||||
|
|
||||||
method = aml_method("_MAT", 0, AML_NOTSERIALIZED);
|
method = aml_method("_MAT", 0, AML_NOTSERIALIZED);
|
||||||
aml_append(method,
|
aml_append(method,
|
||||||
aml_return(aml_call2(CPU_MAT_METHOD, aml_int(apic_id), aml_int(i))
|
aml_return(aml_call2(CPU_MAT_METHOD,
|
||||||
|
aml_int(cpu_apic_id), aml_int(i))
|
||||||
));
|
));
|
||||||
aml_append(dev, method);
|
aml_append(dev, method);
|
||||||
|
|
||||||
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
|
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
|
||||||
aml_append(method,
|
aml_append(method,
|
||||||
aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(apic_id))));
|
aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(cpu_apic_id))));
|
||||||
aml_append(dev, method);
|
aml_append(dev, method);
|
||||||
|
|
||||||
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
|
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
|
||||||
aml_append(method,
|
aml_append(method,
|
||||||
aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(apic_id),
|
aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(cpu_apic_id),
|
||||||
aml_arg(0)))
|
aml_arg(0)))
|
||||||
);
|
);
|
||||||
aml_append(dev, method);
|
aml_append(dev, method);
|
||||||
|
@ -298,11 +299,11 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
|
||||||
/* Arg0 = APIC ID */
|
/* Arg0 = APIC ID */
|
||||||
method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
|
method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
|
||||||
for (i = 0; i < apic_ids->len; i++) {
|
for (i = 0; i < apic_ids->len; i++) {
|
||||||
int apic_id = apic_ids->cpus[i].arch_id;
|
int cpu_apic_id = apic_ids->cpus[i].arch_id;
|
||||||
|
|
||||||
if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(apic_id)));
|
if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(cpu_apic_id)));
|
||||||
aml_append(if_ctx,
|
aml_append(if_ctx,
|
||||||
aml_notify(aml_name("CP%.02X", apic_id), aml_arg(1))
|
aml_notify(aml_name("CP%.02X", cpu_apic_id), aml_arg(1))
|
||||||
);
|
);
|
||||||
aml_append(method, if_ctx);
|
aml_append(method, if_ctx);
|
||||||
}
|
}
|
||||||
|
@ -319,13 +320,13 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
|
||||||
aml_varpackage(x86ms->apic_id_limit);
|
aml_varpackage(x86ms->apic_id_limit);
|
||||||
|
|
||||||
for (i = 0, apic_idx = 0; i < apic_ids->len; i++) {
|
for (i = 0, apic_idx = 0; i < apic_ids->len; i++) {
|
||||||
int apic_id = apic_ids->cpus[i].arch_id;
|
int cpu_apic_id = apic_ids->cpus[i].arch_id;
|
||||||
|
|
||||||
for (; apic_idx < apic_id; apic_idx++) {
|
for (; apic_idx < cpu_apic_id; apic_idx++) {
|
||||||
aml_append(pkg, aml_int(0));
|
aml_append(pkg, aml_int(0));
|
||||||
}
|
}
|
||||||
aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0));
|
aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0));
|
||||||
apic_idx = apic_id + 1;
|
apic_idx = cpu_apic_id + 1;
|
||||||
}
|
}
|
||||||
aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
|
aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
|
||||||
aml_append(ctx, sb_scope);
|
aml_append(ctx, sb_scope);
|
||||||
|
|
|
@ -296,10 +296,9 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp)
|
||||||
{
|
{
|
||||||
const char *r40_nic_models[] = { "gmac", "emac", NULL };
|
const char *r40_nic_models[] = { "gmac", "emac", NULL };
|
||||||
AwR40State *s = AW_R40(dev);
|
AwR40State *s = AW_R40(dev);
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
/* CPUs */
|
/* CPUs */
|
||||||
for (i = 0; i < AW_R40_NUM_CPUS; i++) {
|
for (unsigned i = 0; i < AW_R40_NUM_CPUS; i++) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable secondary CPUs. Guest EL3 firmware will start
|
* Disable secondary CPUs. Guest EL3 firmware will start
|
||||||
|
@ -335,7 +334,7 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp)
|
||||||
* maintenance interrupt signal to the appropriate GIC PPI inputs,
|
* maintenance interrupt signal to the appropriate GIC PPI inputs,
|
||||||
* and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
|
* and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < AW_R40_NUM_CPUS; i++) {
|
for (unsigned i = 0; i < AW_R40_NUM_CPUS; i++) {
|
||||||
DeviceState *cpudev = DEVICE(&s->cpus[i]);
|
DeviceState *cpudev = DEVICE(&s->cpus[i]);
|
||||||
int ppibase = AW_R40_GIC_NUM_SPI + i * GIC_INTERNAL + GIC_NR_SGIS;
|
int ppibase = AW_R40_GIC_NUM_SPI + i * GIC_INTERNAL + GIC_NR_SGIS;
|
||||||
int irq;
|
int irq;
|
||||||
|
@ -494,7 +493,7 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp)
|
||||||
qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_EMAC));
|
qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_EMAC));
|
||||||
|
|
||||||
/* Unimplemented devices */
|
/* Unimplemented devices */
|
||||||
for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) {
|
for (unsigned i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) {
|
||||||
create_unimplemented_device(r40_unimplemented[i].device_name,
|
create_unimplemented_device(r40_unimplemented[i].device_name,
|
||||||
r40_unimplemented[i].base,
|
r40_unimplemented[i].base,
|
||||||
r40_unimplemented[i].size);
|
r40_unimplemented[i].size);
|
||||||
|
|
|
@ -1468,7 +1468,6 @@ static void armsse_realize(DeviceState *dev, Error **errp)
|
||||||
if (info->has_cachectrl) {
|
if (info->has_cachectrl) {
|
||||||
for (i = 0; i < info->num_cpus; i++) {
|
for (i = 0; i < info->num_cpus; i++) {
|
||||||
char *name = g_strdup_printf("cachectrl%d", i);
|
char *name = g_strdup_printf("cachectrl%d", i);
|
||||||
MemoryRegion *mr;
|
|
||||||
|
|
||||||
qdev_prop_set_string(DEVICE(&s->cachectrl[i]), "name", name);
|
qdev_prop_set_string(DEVICE(&s->cachectrl[i]), "name", name);
|
||||||
g_free(name);
|
g_free(name);
|
||||||
|
@ -1484,7 +1483,6 @@ static void armsse_realize(DeviceState *dev, Error **errp)
|
||||||
if (info->has_cpusecctrl) {
|
if (info->has_cpusecctrl) {
|
||||||
for (i = 0; i < info->num_cpus; i++) {
|
for (i = 0; i < info->num_cpus; i++) {
|
||||||
char *name = g_strdup_printf("CPUSECCTRL%d", i);
|
char *name = g_strdup_printf("CPUSECCTRL%d", i);
|
||||||
MemoryRegion *mr;
|
|
||||||
|
|
||||||
qdev_prop_set_string(DEVICE(&s->cpusecctrl[i]), "name", name);
|
qdev_prop_set_string(DEVICE(&s->cpusecctrl[i]), "name", name);
|
||||||
g_free(name);
|
g_free(name);
|
||||||
|
@ -1499,7 +1497,6 @@ static void armsse_realize(DeviceState *dev, Error **errp)
|
||||||
}
|
}
|
||||||
if (info->has_cpuid) {
|
if (info->has_cpuid) {
|
||||||
for (i = 0; i < info->num_cpus; i++) {
|
for (i = 0; i < info->num_cpus; i++) {
|
||||||
MemoryRegion *mr;
|
|
||||||
|
|
||||||
qdev_prop_set_uint32(DEVICE(&s->cpuid[i]), "CPUID", i);
|
qdev_prop_set_uint32(DEVICE(&s->cpuid[i]), "CPUID", i);
|
||||||
if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpuid[i]), errp)) {
|
if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpuid[i]), errp)) {
|
||||||
|
@ -1512,7 +1509,6 @@ static void armsse_realize(DeviceState *dev, Error **errp)
|
||||||
}
|
}
|
||||||
if (info->has_cpu_pwrctrl) {
|
if (info->has_cpu_pwrctrl) {
|
||||||
for (i = 0; i < info->num_cpus; i++) {
|
for (i = 0; i < info->num_cpus; i++) {
|
||||||
MemoryRegion *mr;
|
|
||||||
|
|
||||||
if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), errp)) {
|
if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), errp)) {
|
||||||
return;
|
return;
|
||||||
|
@ -1605,7 +1601,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
|
||||||
/* Wire up the splitters for the MPC IRQs */
|
/* Wire up the splitters for the MPC IRQs */
|
||||||
for (i = 0; i < IOTS_NUM_EXP_MPC + info->sram_banks; i++) {
|
for (i = 0; i < IOTS_NUM_EXP_MPC + info->sram_banks; i++) {
|
||||||
SplitIRQ *splitter = &s->mpc_irq_splitter[i];
|
SplitIRQ *splitter = &s->mpc_irq_splitter[i];
|
||||||
DeviceState *dev_splitter = DEVICE(splitter);
|
DeviceState *devs = DEVICE(splitter);
|
||||||
|
|
||||||
if (!object_property_set_int(OBJECT(splitter), "num-lines", 2,
|
if (!object_property_set_int(OBJECT(splitter), "num-lines", 2,
|
||||||
errp)) {
|
errp)) {
|
||||||
|
@ -1617,22 +1613,22 @@ static void armsse_realize(DeviceState *dev, Error **errp)
|
||||||
|
|
||||||
if (i < IOTS_NUM_EXP_MPC) {
|
if (i < IOTS_NUM_EXP_MPC) {
|
||||||
/* Splitter input is from GPIO input line */
|
/* Splitter input is from GPIO input line */
|
||||||
s->mpcexp_status_in[i] = qdev_get_gpio_in(dev_splitter, 0);
|
s->mpcexp_status_in[i] = qdev_get_gpio_in(devs, 0);
|
||||||
qdev_connect_gpio_out(dev_splitter, 0,
|
qdev_connect_gpio_out(devs, 0,
|
||||||
qdev_get_gpio_in_named(dev_secctl,
|
qdev_get_gpio_in_named(dev_secctl,
|
||||||
"mpcexp_status", i));
|
"mpcexp_status", i));
|
||||||
} else {
|
} else {
|
||||||
/* Splitter input is from our own MPC */
|
/* Splitter input is from our own MPC */
|
||||||
qdev_connect_gpio_out_named(DEVICE(&s->mpc[i - IOTS_NUM_EXP_MPC]),
|
qdev_connect_gpio_out_named(DEVICE(&s->mpc[i - IOTS_NUM_EXP_MPC]),
|
||||||
"irq", 0,
|
"irq", 0,
|
||||||
qdev_get_gpio_in(dev_splitter, 0));
|
qdev_get_gpio_in(devs, 0));
|
||||||
qdev_connect_gpio_out(dev_splitter, 0,
|
qdev_connect_gpio_out(devs, 0,
|
||||||
qdev_get_gpio_in_named(dev_secctl,
|
qdev_get_gpio_in_named(dev_secctl,
|
||||||
"mpc_status",
|
"mpc_status",
|
||||||
i - IOTS_NUM_EXP_MPC));
|
i - IOTS_NUM_EXP_MPC));
|
||||||
}
|
}
|
||||||
|
|
||||||
qdev_connect_gpio_out(dev_splitter, 1,
|
qdev_connect_gpio_out(devs, 1,
|
||||||
qdev_get_gpio_in(DEVICE(&s->mpc_irq_orgate), i));
|
qdev_get_gpio_in(DEVICE(&s->mpc_irq_orgate), i));
|
||||||
}
|
}
|
||||||
/* Create GPIO inputs which will pass the line state for our
|
/* Create GPIO inputs which will pass the line state for our
|
||||||
|
|
|
@ -517,7 +517,7 @@ static void armv7m_realize(DeviceState *dev, Error **errp)
|
||||||
for (i = 0; i < ARRAY_SIZE(s->bitband); i++) {
|
for (i = 0; i < ARRAY_SIZE(s->bitband); i++) {
|
||||||
if (s->enable_bitband) {
|
if (s->enable_bitband) {
|
||||||
Object *obj = OBJECT(&s->bitband[i]);
|
Object *obj = OBJECT(&s->bitband[i]);
|
||||||
SysBusDevice *sbd = SYS_BUS_DEVICE(&s->bitband[i]);
|
sbd = SYS_BUS_DEVICE(&s->bitband[i]);
|
||||||
|
|
||||||
if (!object_property_set_int(obj, "base",
|
if (!object_property_set_int(obj, "base",
|
||||||
bitband_input_addr[i], errp)) {
|
bitband_input_addr[i], errp)) {
|
||||||
|
|
|
@ -388,7 +388,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
|
||||||
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0,
|
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0,
|
||||||
sc->memmap[ASPEED_DEV_TIMER1]);
|
sc->memmap[ASPEED_DEV_TIMER1]);
|
||||||
for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
|
for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
|
||||||
qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i);
|
irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i);
|
||||||
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
|
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,8 +413,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
|
||||||
}
|
}
|
||||||
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]);
|
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]);
|
||||||
for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) {
|
for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) {
|
||||||
qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
|
irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
|
||||||
sc->irqmap[ASPEED_DEV_I2C] + i);
|
sc->irqmap[ASPEED_DEV_I2C] + i);
|
||||||
/* The AST2600 I2C controller has one IRQ per bus. */
|
/* The AST2600 I2C controller has one IRQ per bus. */
|
||||||
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq);
|
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq);
|
||||||
}
|
}
|
||||||
|
@ -611,8 +611,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
|
||||||
}
|
}
|
||||||
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]);
|
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]);
|
||||||
for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) {
|
for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) {
|
||||||
qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
|
irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
|
||||||
sc->irqmap[ASPEED_DEV_I3C] + i);
|
sc->irqmap[ASPEED_DEV_I3C] + i);
|
||||||
/* The AST2600 I3C controller has one IRQ per bus. */
|
/* The AST2600 I3C controller has one IRQ per bus. */
|
||||||
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq);
|
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq);
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,12 +328,9 @@ enum { /* Command completion notification */
|
||||||
#define CMD_TTL(x) extract32((x)->word[2], 8 , 2)
|
#define CMD_TTL(x) extract32((x)->word[2], 8 , 2)
|
||||||
#define CMD_TG(x) extract32((x)->word[2], 10, 2)
|
#define CMD_TG(x) extract32((x)->word[2], 10, 2)
|
||||||
#define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5)
|
#define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5)
|
||||||
#define CMD_ADDR(x) ({ \
|
#define CMD_ADDR(x) \
|
||||||
uint64_t high = (uint64_t)(x)->word[3]; \
|
(((uint64_t)((x)->word[3]) << 32) | \
|
||||||
uint64_t low = extract32((x)->word[2], 12, 20); \
|
((extract64((x)->word[2], 12, 20)) << 12))
|
||||||
uint64_t addr = high << 32 | (low << 12); \
|
|
||||||
addr; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define SMMU_FEATURE_2LVL_STE (1 << 0)
|
#define SMMU_FEATURE_2LVL_STE (1 << 0)
|
||||||
|
|
||||||
|
@ -533,21 +530,13 @@ typedef struct CD {
|
||||||
#define STE_S2S(x) extract32((x)->word[5], 25, 1)
|
#define STE_S2S(x) extract32((x)->word[5], 25, 1)
|
||||||
#define STE_S2R(x) extract32((x)->word[5], 26, 1)
|
#define STE_S2R(x) extract32((x)->word[5], 26, 1)
|
||||||
|
|
||||||
#define STE_CTXPTR(x) \
|
#define STE_CTXPTR(x) \
|
||||||
({ \
|
((extract64((x)->word[1], 0, 16) << 32) | \
|
||||||
unsigned long addr; \
|
((x)->word[0] & 0xffffffc0))
|
||||||
addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32; \
|
|
||||||
addr |= (uint64_t)((x)->word[0] & 0xffffffc0); \
|
|
||||||
addr; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define STE_S2TTB(x) \
|
#define STE_S2TTB(x) \
|
||||||
({ \
|
((extract64((x)->word[7], 0, 16) << 32) | \
|
||||||
unsigned long addr; \
|
((x)->word[6] & 0xfffffff0))
|
||||||
addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32; \
|
|
||||||
addr |= (uint64_t)((x)->word[6] & 0xfffffff0); \
|
|
||||||
addr; \
|
|
||||||
})
|
|
||||||
|
|
||||||
static inline int oas2bits(int oas_field)
|
static inline int oas2bits(int oas_field)
|
||||||
{
|
{
|
||||||
|
@ -585,14 +574,10 @@ static inline int pa_range(STE *ste)
|
||||||
|
|
||||||
#define CD_VALID(x) extract32((x)->word[0], 31, 1)
|
#define CD_VALID(x) extract32((x)->word[0], 31, 1)
|
||||||
#define CD_ASID(x) extract32((x)->word[1], 16, 16)
|
#define CD_ASID(x) extract32((x)->word[1], 16, 16)
|
||||||
#define CD_TTB(x, sel) \
|
#define CD_TTB(x, sel) \
|
||||||
({ \
|
((extract64((x)->word[(sel) * 2 + 3], 0, 19) << 32) | \
|
||||||
uint64_t hi, lo; \
|
((x)->word[(sel) * 2 + 2] & ~0xfULL))
|
||||||
hi = extract32((x)->word[(sel) * 2 + 3], 0, 19); \
|
|
||||||
hi <<= 32; \
|
|
||||||
lo = (x)->word[(sel) * 2 + 2] & ~0xfULL; \
|
|
||||||
hi | lo; \
|
|
||||||
})
|
|
||||||
#define CD_HAD(x, sel) extract32((x)->word[(sel) * 2 + 2], 1, 1)
|
#define CD_HAD(x, sel) extract32((x)->word[(sel) * 2 + 2], 1, 1)
|
||||||
|
|
||||||
#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
|
#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
|
||||||
|
|
|
@ -1040,8 +1040,8 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
|
||||||
SMMUv3State *s = sdev->smmu;
|
SMMUv3State *s = sdev->smmu;
|
||||||
|
|
||||||
if (!tg) {
|
if (!tg) {
|
||||||
SMMUEventInfo event = {.inval_ste_allowed = true};
|
SMMUEventInfo eventinfo = {.inval_ste_allowed = true};
|
||||||
SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
|
SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo);
|
||||||
SMMUTransTableInfo *tt;
|
SMMUTransTableInfo *tt;
|
||||||
|
|
||||||
if (!cfg) {
|
if (!cfg) {
|
||||||
|
|
|
@ -801,7 +801,6 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
||||||
for (i = 0; i < smp_cpus; i++) {
|
for (i = 0; i < smp_cpus; i++) {
|
||||||
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
|
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
|
||||||
int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
|
int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
|
||||||
int irq;
|
|
||||||
/* Mapping from the output timer irq lines from the CPU to the
|
/* Mapping from the output timer irq lines from the CPU to the
|
||||||
* GIC PPI inputs we use for the virt board.
|
* GIC PPI inputs we use for the virt board.
|
||||||
*/
|
*/
|
||||||
|
@ -812,7 +811,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
||||||
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
|
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
|
||||||
};
|
};
|
||||||
|
|
||||||
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
||||||
qdev_connect_gpio_out(cpudev, irq,
|
qdev_connect_gpio_out(cpudev, irq,
|
||||||
qdev_get_gpio_in(vms->gic,
|
qdev_get_gpio_in(vms->gic,
|
||||||
ppibase + timer_irq[irq]));
|
ppibase + timer_irq[irq]));
|
||||||
|
|
|
@ -369,7 +369,7 @@ static void xen_block_get_vdev(Object *obj, Visitor *v, const char *name,
|
||||||
case XEN_BLOCK_VDEV_TYPE_XVD:
|
case XEN_BLOCK_VDEV_TYPE_XVD:
|
||||||
case XEN_BLOCK_VDEV_TYPE_HD:
|
case XEN_BLOCK_VDEV_TYPE_HD:
|
||||||
case XEN_BLOCK_VDEV_TYPE_SD: {
|
case XEN_BLOCK_VDEV_TYPE_SD: {
|
||||||
char *name = disk_to_vbd_name(vdev->disk);
|
char *vbd_name = disk_to_vbd_name(vdev->disk);
|
||||||
|
|
||||||
str = g_strdup_printf("%s%s%lu",
|
str = g_strdup_printf("%s%s%lu",
|
||||||
(vdev->type == XEN_BLOCK_VDEV_TYPE_XVD) ?
|
(vdev->type == XEN_BLOCK_VDEV_TYPE_XVD) ?
|
||||||
|
@ -377,8 +377,8 @@ static void xen_block_get_vdev(Object *obj, Visitor *v, const char *name,
|
||||||
(vdev->type == XEN_BLOCK_VDEV_TYPE_HD) ?
|
(vdev->type == XEN_BLOCK_VDEV_TYPE_HD) ?
|
||||||
"hd" :
|
"hd" :
|
||||||
"sd",
|
"sd",
|
||||||
name, vdev->partition);
|
vbd_name, vdev->partition);
|
||||||
g_free(name);
|
g_free(vbd_name);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -1082,8 +1082,6 @@ static void machine_initfn(Object *obj)
|
||||||
ms->maxram_size = mc->default_ram_size;
|
ms->maxram_size = mc->default_ram_size;
|
||||||
|
|
||||||
if (mc->nvdimm_supported) {
|
if (mc->nvdimm_supported) {
|
||||||
Object *obj = OBJECT(ms);
|
|
||||||
|
|
||||||
ms->nvdimms_state = g_new0(NVDIMMState, 1);
|
ms->nvdimms_state = g_new0(NVDIMMState, 1);
|
||||||
object_property_add_bool(obj, "nvdimm",
|
object_property_add_bool(obj, "nvdimm",
|
||||||
machine_get_nvdimm, machine_set_nvdimm);
|
machine_get_nvdimm, machine_set_nvdimm);
|
||||||
|
|
|
@ -312,7 +312,6 @@ static void aspeed_i2c_bus_recv(AspeedI2CBus *bus)
|
||||||
SHARED_ARRAY_FIELD_DP32(bus->regs, reg_pool_ctrl, RX_COUNT, i & 0xff);
|
SHARED_ARRAY_FIELD_DP32(bus->regs, reg_pool_ctrl, RX_COUNT, i & 0xff);
|
||||||
SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, RX_BUFF_EN, 0);
|
SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, RX_BUFF_EN, 0);
|
||||||
} else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN)) {
|
} else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN)) {
|
||||||
uint8_t data;
|
|
||||||
/* In new mode, clear how many bytes we RXed */
|
/* In new mode, clear how many bytes we RXed */
|
||||||
if (aspeed_i2c_is_new_mode(bus->controller)) {
|
if (aspeed_i2c_is_new_mode(bus->controller)) {
|
||||||
ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, RX_LEN, 0);
|
ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, RX_LEN, 0);
|
||||||
|
|
|
@ -1585,12 +1585,12 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||||
aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
|
aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
|
||||||
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
|
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
|
||||||
if (pci_bus_is_cxl(bus)) {
|
if (pci_bus_is_cxl(bus)) {
|
||||||
struct Aml *pkg = aml_package(2);
|
struct Aml *aml_pkg = aml_package(2);
|
||||||
|
|
||||||
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0016")));
|
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0016")));
|
||||||
aml_append(pkg, aml_eisaid("PNP0A08"));
|
aml_append(aml_pkg, aml_eisaid("PNP0A08"));
|
||||||
aml_append(pkg, aml_eisaid("PNP0A03"));
|
aml_append(aml_pkg, aml_eisaid("PNP0A03"));
|
||||||
aml_append(dev, aml_name_decl("_CID", pkg));
|
aml_append(dev, aml_name_decl("_CID", aml_pkg));
|
||||||
build_cxl_osc_method(dev);
|
build_cxl_osc_method(dev);
|
||||||
} else if (pci_bus_is_express(bus)) {
|
} else if (pci_bus_is_express(bus)) {
|
||||||
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
|
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
|
||||||
|
@ -1783,14 +1783,14 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||||
Object *pci_host = acpi_get_i386_pci_host();
|
Object *pci_host = acpi_get_i386_pci_host();
|
||||||
|
|
||||||
if (pci_host) {
|
if (pci_host) {
|
||||||
PCIBus *bus = PCI_HOST_BRIDGE(pci_host)->bus;
|
PCIBus *pbus = PCI_HOST_BRIDGE(pci_host)->bus;
|
||||||
Aml *scope = aml_scope("PCI0");
|
Aml *ascope = aml_scope("PCI0");
|
||||||
/* Scan all PCI buses. Generate tables to support hotplug. */
|
/* Scan all PCI buses. Generate tables to support hotplug. */
|
||||||
build_append_pci_bus_devices(scope, bus);
|
build_append_pci_bus_devices(ascope, pbus);
|
||||||
if (object_property_find(OBJECT(bus), ACPI_PCIHP_PROP_BSEL)) {
|
if (object_property_find(OBJECT(pbus), ACPI_PCIHP_PROP_BSEL)) {
|
||||||
build_append_pcihp_slots(scope, bus);
|
build_append_pcihp_slots(ascope, pbus);
|
||||||
}
|
}
|
||||||
aml_append(sb_scope, scope);
|
aml_append(sb_scope, ascope);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1842,10 +1842,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||||
bool has_pcnt;
|
bool has_pcnt;
|
||||||
|
|
||||||
Object *pci_host = acpi_get_i386_pci_host();
|
Object *pci_host = acpi_get_i386_pci_host();
|
||||||
PCIBus *bus = PCI_HOST_BRIDGE(pci_host)->bus;
|
PCIBus *b = PCI_HOST_BRIDGE(pci_host)->bus;
|
||||||
|
|
||||||
scope = aml_scope("\\_SB.PCI0");
|
scope = aml_scope("\\_SB.PCI0");
|
||||||
has_pcnt = build_append_notfication_callback(scope, bus);
|
has_pcnt = build_append_notfication_callback(scope, b);
|
||||||
if (has_pcnt) {
|
if (has_pcnt) {
|
||||||
aml_append(dsdt, scope);
|
aml_append(dsdt, scope);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3744,7 +3744,7 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
|
||||||
/* Unmap the whole range in the notifier's scope. */
|
/* Unmap the whole range in the notifier's scope. */
|
||||||
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
||||||
{
|
{
|
||||||
hwaddr size, remain;
|
hwaddr total, remain;
|
||||||
hwaddr start = n->start;
|
hwaddr start = n->start;
|
||||||
hwaddr end = n->end;
|
hwaddr end = n->end;
|
||||||
IntelIOMMUState *s = as->iommu_state;
|
IntelIOMMUState *s = as->iommu_state;
|
||||||
|
@ -3765,7 +3765,7 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(start <= end);
|
assert(start <= end);
|
||||||
size = remain = end - start + 1;
|
total = remain = end - start + 1;
|
||||||
|
|
||||||
while (remain >= VTD_PAGE_SIZE) {
|
while (remain >= VTD_PAGE_SIZE) {
|
||||||
IOMMUTLBEvent event;
|
IOMMUTLBEvent event;
|
||||||
|
@ -3793,10 +3793,10 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
|
||||||
trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
|
trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
|
||||||
VTD_PCI_SLOT(as->devfn),
|
VTD_PCI_SLOT(as->devfn),
|
||||||
VTD_PCI_FUNC(as->devfn),
|
VTD_PCI_FUNC(as->devfn),
|
||||||
n->start, size);
|
n->start, total);
|
||||||
|
|
||||||
map.iova = n->start;
|
map.iova = n->start;
|
||||||
map.size = size - 1; /* Inclusive */
|
map.size = total - 1; /* Inclusive */
|
||||||
iova_tree_remove(as->iova_tree, map);
|
iova_tree_remove(as->iova_tree, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -545,10 +545,10 @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) {
|
if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) {
|
||||||
ITEntry ite = {};
|
ITEntry i = {};
|
||||||
/* remove mapping from interrupt translation table */
|
/* remove mapping from interrupt translation table */
|
||||||
ite.valid = false;
|
i.valid = false;
|
||||||
return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
|
return update_ite(s, eventid, &dte, &i) ? CMD_CONTINUE_OK : CMD_STALL;
|
||||||
}
|
}
|
||||||
return CMD_CONTINUE_OK;
|
return CMD_CONTINUE_OK;
|
||||||
}
|
}
|
||||||
|
|
|
@ -610,11 +610,8 @@ static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
|
||||||
case 0x10B0:
|
case 0x10B0:
|
||||||
case 0x10C0:
|
case 0x10C0:
|
||||||
case 0x10D0:
|
case 0x10D0:
|
||||||
{
|
idx = (addr - 0x10A0) >> 4;
|
||||||
int idx;
|
write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
|
||||||
idx = (addr - 0x10A0) >> 4;
|
|
||||||
write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 0x10E0: /* SPVE */
|
case 0x10E0: /* SPVE */
|
||||||
opp->spve = val & opp->vector_mask;
|
opp->spve = val & opp->vector_mask;
|
||||||
|
|
|
@ -44,15 +44,14 @@
|
||||||
|
|
||||||
#define BOOTINFOSTR(base, id, string) \
|
#define BOOTINFOSTR(base, id, string) \
|
||||||
do { \
|
do { \
|
||||||
int i; \
|
|
||||||
stw_p(base, id); \
|
stw_p(base, id); \
|
||||||
base += 2; \
|
base += 2; \
|
||||||
stw_p(base, \
|
stw_p(base, \
|
||||||
(sizeof(struct bi_record) + strlen(string) + \
|
(sizeof(struct bi_record) + strlen(string) + \
|
||||||
1 /* null termination */ + 3 /* padding */) & ~3); \
|
1 /* null termination */ + 3 /* padding */) & ~3); \
|
||||||
base += 2; \
|
base += 2; \
|
||||||
for (i = 0; string[i]; i++) { \
|
for (unsigned i_ = 0; string[i_]; i_++) { \
|
||||||
stb_p(base++, string[i]); \
|
stb_p(base++, string[i_]); \
|
||||||
} \
|
} \
|
||||||
stb_p(base++, 0); \
|
stb_p(base++, 0); \
|
||||||
base = QEMU_ALIGN_PTR_UP(base, 4); \
|
base = QEMU_ALIGN_PTR_UP(base, 4); \
|
||||||
|
@ -60,7 +59,6 @@
|
||||||
|
|
||||||
#define BOOTINFODATA(base, id, data, len) \
|
#define BOOTINFODATA(base, id, data, len) \
|
||||||
do { \
|
do { \
|
||||||
int i; \
|
|
||||||
stw_p(base, id); \
|
stw_p(base, id); \
|
||||||
base += 2; \
|
base += 2; \
|
||||||
stw_p(base, \
|
stw_p(base, \
|
||||||
|
@ -69,8 +67,8 @@
|
||||||
base += 2; \
|
base += 2; \
|
||||||
stw_p(base, len); \
|
stw_p(base, len); \
|
||||||
base += 2; \
|
base += 2; \
|
||||||
for (i = 0; i < len; ++i) { \
|
for (unsigned i_ = 0; i_ < len; ++i_) { \
|
||||||
stb_p(base++, data[i]); \
|
stb_p(base++, data[i_]); \
|
||||||
} \
|
} \
|
||||||
base = QEMU_ALIGN_PTR_UP(base, 4); \
|
base = QEMU_ALIGN_PTR_UP(base, 4); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -183,7 +183,7 @@ petalogix_ml605_init(MachineState *machine)
|
||||||
spi = (SSIBus *)qdev_get_child_bus(dev, "spi");
|
spi = (SSIBus *)qdev_get_child_bus(dev, "spi");
|
||||||
|
|
||||||
for (i = 0; i < NUM_SPI_FLASHES; i++) {
|
for (i = 0; i < NUM_SPI_FLASHES; i++) {
|
||||||
DriveInfo *dinfo = drive_get(IF_MTD, 0, i);
|
dinfo = drive_get(IF_MTD, 0, i);
|
||||||
qemu_irq cs_line;
|
qemu_irq cs_line;
|
||||||
|
|
||||||
dev = qdev_new("n25q128");
|
dev = qdev_new("n25q128");
|
||||||
|
|
|
@ -534,12 +534,12 @@ static void arm_sysctl_write(void *opaque, hwaddr offset,
|
||||||
s->sys_cfgstat |= 2; /* error */
|
s->sys_cfgstat |= 2; /* error */
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uint32_t val;
|
uint32_t data;
|
||||||
if (!vexpress_cfgctrl_read(s, dcc, function, site, position,
|
if (!vexpress_cfgctrl_read(s, dcc, function, site, position,
|
||||||
device, &val)) {
|
device, &data)) {
|
||||||
s->sys_cfgstat |= 2; /* error */
|
s->sys_cfgstat |= 2; /* error */
|
||||||
} else {
|
} else {
|
||||||
s->sys_cfgdata = val;
|
s->sys_cfgdata = data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -296,13 +296,13 @@ static void aspeed_i3c_realize(DeviceState *dev, Error **errp)
|
||||||
memory_region_add_subregion(&s->iomem_container, 0x0, &s->iomem);
|
memory_region_add_subregion(&s->iomem_container, 0x0, &s->iomem);
|
||||||
|
|
||||||
for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) {
|
for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) {
|
||||||
Object *dev = OBJECT(&s->devices[i]);
|
Object *i3c_dev = OBJECT(&s->devices[i]);
|
||||||
|
|
||||||
if (!object_property_set_uint(dev, "device-id", i, errp)) {
|
if (!object_property_set_uint(i3c_dev, "device-id", i, errp)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sysbus_realize(SYS_BUS_DEVICE(dev), errp)) {
|
if (!sysbus_realize(SYS_BUS_DEVICE(i3c_dev), errp)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ static void nios2_10m50_ghrd_init(MachineState *machine)
|
||||||
qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal);
|
qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal);
|
||||||
|
|
||||||
if (nms->vic) {
|
if (nms->vic) {
|
||||||
DeviceState *dev = qdev_new(TYPE_NIOS2_VIC);
|
dev = qdev_new(TYPE_NIOS2_VIC);
|
||||||
MemoryRegion *dev_mr;
|
MemoryRegion *dev_mr;
|
||||||
qemu_irq cpu_irq;
|
qemu_irq cpu_irq;
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ static void nios2_10m50_ghrd_init(MachineState *machine)
|
||||||
|
|
||||||
cpu_irq = qdev_get_gpio_in_named(DEVICE(cpu), "EIC", 0);
|
cpu_irq = qdev_get_gpio_in_named(DEVICE(cpu), "EIC", 0);
|
||||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq);
|
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq);
|
||||||
for (int i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
irq[i] = qdev_get_gpio_in(dev, i);
|
irq[i] = qdev_get_gpio_in(dev, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
|
||||||
|
|
||||||
ns->pif = ns->params.pif;
|
ns->pif = ns->params.pif;
|
||||||
|
|
||||||
static const NvmeLBAF lbaf[16] = {
|
static const NvmeLBAF defaults[16] = {
|
||||||
[0] = { .ds = 9 },
|
[0] = { .ds = 9 },
|
||||||
[1] = { .ds = 9, .ms = 8 },
|
[1] = { .ds = 9, .ms = 8 },
|
||||||
[2] = { .ds = 9, .ms = 16 },
|
[2] = { .ds = 9, .ms = 16 },
|
||||||
|
@ -120,7 +120,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
|
||||||
|
|
||||||
ns->nlbaf = 8;
|
ns->nlbaf = 8;
|
||||||
|
|
||||||
memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf));
|
memcpy(&id_ns->lbaf, &defaults, sizeof(defaults));
|
||||||
|
|
||||||
for (i = 0; i < ns->nlbaf; i++) {
|
for (i = 0; i < ns->nlbaf; i++) {
|
||||||
NvmeLBAF *lbaf = &id_ns->lbaf[i];
|
NvmeLBAF *lbaf = &id_ns->lbaf[i];
|
||||||
|
|
|
@ -738,8 +738,9 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) {
|
if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) {
|
||||||
hwaddr addr = val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K);
|
hwaddr esb_addr =
|
||||||
memory_region_add_subregion(sysmem, addr,
|
val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K);
|
||||||
|
memory_region_add_subregion(sysmem, esb_addr,
|
||||||
&psi9->source.esb_mmio);
|
&psi9->source.esb_mmio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -780,6 +780,26 @@ static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
|
||||||
pcc->lrg_decr_bits)));
|
pcc->lrg_decr_bits)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void spapr_dt_one_cpu(void *fdt, SpaprMachineState *spapr, CPUState *cs,
|
||||||
|
int cpus_offset)
|
||||||
|
{
|
||||||
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||||
|
int index = spapr_get_vcpu_id(cpu);
|
||||||
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
||||||
|
g_autofree char *nodename = NULL;
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
|
||||||
|
offset = fdt_add_subnode(fdt, cpus_offset, nodename);
|
||||||
|
_FDT(offset);
|
||||||
|
spapr_dt_cpu(cs, fdt, offset, spapr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
|
static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
|
||||||
{
|
{
|
||||||
CPUState **rev;
|
CPUState **rev;
|
||||||
|
@ -809,21 +829,7 @@ static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = n_cpus - 1; i >= 0; i--) {
|
for (i = n_cpus - 1; i >= 0; i--) {
|
||||||
CPUState *cs = rev[i];
|
spapr_dt_one_cpu(fdt, spapr, rev[i], cpus_offset);
|
||||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
||||||
int index = spapr_get_vcpu_id(cpu);
|
|
||||||
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
|
||||||
g_autofree char *nodename = NULL;
|
|
||||||
int offset;
|
|
||||||
|
|
||||||
if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
|
|
||||||
offset = fdt_add_subnode(fdt, cpus_offset, nodename);
|
|
||||||
_FDT(offset);
|
|
||||||
spapr_dt_cpu(cs, fdt, offset, spapr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
g_free(rev);
|
g_free(rev);
|
||||||
|
@ -2659,8 +2665,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (smc->pre_2_10_has_unused_icps) {
|
if (smc->pre_2_10_has_unused_icps) {
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < spapr_max_server_number(spapr); i++) {
|
for (i = 0; i < spapr_max_server_number(spapr); i++) {
|
||||||
/* Dummy entries get deregistered when real ICPState objects
|
/* Dummy entries get deregistered when real ICPState objects
|
||||||
* are registered during CPU core hotplug.
|
* are registered during CPU core hotplug.
|
||||||
|
@ -3210,8 +3214,8 @@ static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
|
||||||
|
|
||||||
if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
|
if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
|
||||||
/* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
|
/* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
|
||||||
PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
|
PCIDevice *pdev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
|
||||||
return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
|
return g_strdup_printf("pci@%x", PCI_SLOT(pdev->devfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pcidev) {
|
if (pcidev) {
|
||||||
|
|
|
@ -341,7 +341,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
|
||||||
fdt_depth = 0;
|
fdt_depth = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
const char *name = NULL;
|
const char *dt_name = NULL;
|
||||||
const struct fdt_property *prop = NULL;
|
const struct fdt_property *prop = NULL;
|
||||||
int prop_len = 0, name_len = 0;
|
int prop_len = 0, name_len = 0;
|
||||||
uint32_t tag;
|
uint32_t tag;
|
||||||
|
@ -351,8 +351,8 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
|
||||||
switch (tag) {
|
switch (tag) {
|
||||||
case FDT_BEGIN_NODE:
|
case FDT_BEGIN_NODE:
|
||||||
fdt_depth++;
|
fdt_depth++;
|
||||||
name = fdt_get_name(fdt, fdt_offset, &name_len);
|
dt_name = fdt_get_name(fdt, fdt_offset, &name_len);
|
||||||
if (!visit_start_struct(v, name, NULL, 0, errp)) {
|
if (!visit_start_struct(v, dt_name, NULL, 0, errp)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -369,8 +369,8 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
|
||||||
case FDT_PROP: {
|
case FDT_PROP: {
|
||||||
int i;
|
int i;
|
||||||
prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
|
prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
|
||||||
name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
|
dt_name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
|
||||||
if (!visit_start_list(v, name, NULL, 0, errp)) {
|
if (!visit_start_list(v, dt_name, NULL, 0, errp)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (i = 0; i < prop_len; i++) {
|
for (i = 0; i < prop_len; i++) {
|
||||||
|
@ -1237,8 +1237,6 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
|
||||||
case FDT_END_NODE:
|
case FDT_END_NODE:
|
||||||
drc->ccs_depth--;
|
drc->ccs_depth--;
|
||||||
if (drc->ccs_depth == 0) {
|
if (drc->ccs_depth == 0) {
|
||||||
uint32_t drc_index = spapr_drc_index(drc);
|
|
||||||
|
|
||||||
/* done sending the device tree, move to configured state */
|
/* done sending the device tree, move to configured state */
|
||||||
trace_spapr_drc_set_configured(drc_index);
|
trace_spapr_drc_set_configured(drc_index);
|
||||||
drc->state = drck->ready_state;
|
drc->state = drck->ready_state;
|
||||||
|
|
|
@ -1826,9 +1826,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
|
||||||
(SpaprMachineState *) object_dynamic_cast(qdev_get_machine(),
|
(SpaprMachineState *) object_dynamic_cast(qdev_get_machine(),
|
||||||
TYPE_SPAPR_MACHINE);
|
TYPE_SPAPR_MACHINE);
|
||||||
SpaprMachineClass *smc = spapr ? SPAPR_MACHINE_GET_CLASS(spapr) : NULL;
|
SpaprMachineClass *smc = spapr ? SPAPR_MACHINE_GET_CLASS(spapr) : NULL;
|
||||||
SysBusDevice *s = SYS_BUS_DEVICE(dev);
|
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||||
SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
|
SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(sbd);
|
||||||
PCIHostState *phb = PCI_HOST_BRIDGE(s);
|
PCIHostState *phb = PCI_HOST_BRIDGE(sbd);
|
||||||
MachineState *ms = MACHINE(spapr);
|
MachineState *ms = MACHINE(spapr);
|
||||||
char *namebuf;
|
char *namebuf;
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -227,7 +227,7 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
|
||||||
IRQ_M_TIMER));
|
IRQ_M_TIMER));
|
||||||
|
|
||||||
/* SPI-Hosts */
|
/* SPI-Hosts */
|
||||||
for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) {
|
for (i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) {
|
||||||
dev = DEVICE(&(s->spi_host[i]));
|
dev = DEVICE(&(s->spi_host[i]));
|
||||||
if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi_host[i]), errp)) {
|
if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi_host[i]), errp)) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1423,13 +1423,14 @@ void smbios_entry_add(QemuOpts *opts, Error **errp)
|
||||||
if (!qemu_opts_validate(opts, qemu_smbios_type8_opts, errp)) {
|
if (!qemu_opts_validate(opts, qemu_smbios_type8_opts, errp)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
struct type8_instance *t;
|
struct type8_instance *t8_i;
|
||||||
t = g_new0(struct type8_instance, 1);
|
t8_i = g_new0(struct type8_instance, 1);
|
||||||
save_opt(&t->internal_reference, opts, "internal_reference");
|
save_opt(&t8_i->internal_reference, opts, "internal_reference");
|
||||||
save_opt(&t->external_reference, opts, "external_reference");
|
save_opt(&t8_i->external_reference, opts, "external_reference");
|
||||||
t->connector_type = qemu_opt_get_number(opts, "connector_type", 0);
|
t8_i->connector_type = qemu_opt_get_number(opts,
|
||||||
t->port_type = qemu_opt_get_number(opts, "port_type", 0);
|
"connector_type", 0);
|
||||||
QTAILQ_INSERT_TAIL(&type8, t, next);
|
t8_i->port_type = qemu_opt_get_number(opts, "port_type", 0);
|
||||||
|
QTAILQ_INSERT_TAIL(&type8, t8_i, next);
|
||||||
return;
|
return;
|
||||||
case 11:
|
case 11:
|
||||||
if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) {
|
if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) {
|
||||||
|
@ -1452,27 +1453,27 @@ void smbios_entry_add(QemuOpts *opts, Error **errp)
|
||||||
type17.speed = qemu_opt_get_number(opts, "speed", 0);
|
type17.speed = qemu_opt_get_number(opts, "speed", 0);
|
||||||
return;
|
return;
|
||||||
case 41: {
|
case 41: {
|
||||||
struct type41_instance *t;
|
struct type41_instance *t41_i;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
if (!qemu_opts_validate(opts, qemu_smbios_type41_opts, errp)) {
|
if (!qemu_opts_validate(opts, qemu_smbios_type41_opts, errp)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
t = g_new0(struct type41_instance, 1);
|
t41_i = g_new0(struct type41_instance, 1);
|
||||||
save_opt(&t->designation, opts, "designation");
|
save_opt(&t41_i->designation, opts, "designation");
|
||||||
t->kind = qapi_enum_parse(&type41_kind_lookup,
|
t41_i->kind = qapi_enum_parse(&type41_kind_lookup,
|
||||||
qemu_opt_get(opts, "kind"),
|
qemu_opt_get(opts, "kind"),
|
||||||
0, &local_err) + 1;
|
0, &local_err) + 1;
|
||||||
t->kind |= 0x80; /* enabled */
|
t41_i->kind |= 0x80; /* enabled */
|
||||||
if (local_err != NULL) {
|
if (local_err != NULL) {
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
g_free(t);
|
g_free(t41_i);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
t->instance = qemu_opt_get_number(opts, "instance", 1);
|
t41_i->instance = qemu_opt_get_number(opts, "instance", 1);
|
||||||
save_opt(&t->pcidev, opts, "pcidev");
|
save_opt(&t41_i->pcidev, opts, "pcidev");
|
||||||
|
|
||||||
QTAILQ_INSERT_TAIL(&type41, t, next);
|
QTAILQ_INSERT_TAIL(&type41, t41_i, next);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -167,7 +167,7 @@ static uint64_t calculate_next(struct AspeedTimer *t)
|
||||||
qemu_set_irq(t->irq, t->level);
|
qemu_set_irq(t->irq, t->level);
|
||||||
}
|
}
|
||||||
|
|
||||||
next = MAX(MAX(calculate_match(t, 0), calculate_match(t, 1)), 0);
|
next = MAX(calculate_match(t, 0), calculate_match(t, 1));
|
||||||
t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||||
|
|
||||||
return calculate_time(t, next);
|
return calculate_time(t, next);
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
|
|
||||||
#define _FDT(exp) \
|
#define _FDT(exp) \
|
||||||
do { \
|
do { \
|
||||||
int ret = (exp); \
|
int _ret = (exp); \
|
||||||
if (ret < 0) { \
|
if (_ret < 0) { \
|
||||||
error_report("error creating device tree: %s: %s", \
|
error_report("error creating device tree: %s: %s", \
|
||||||
#exp, fdt_strerror(ret)); \
|
#exp, fdt_strerror(_ret)); \
|
||||||
exit(1); \
|
exit(1); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -45,10 +45,16 @@ struct QObject {
|
||||||
struct QObjectBase_ base;
|
struct QObjectBase_ base;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define QOBJECT(obj) ({ \
|
/*
|
||||||
|
* Preprocessor sorcery ahead: use a different identifier for the
|
||||||
|
* local variable in each expansion, so we can nest macro calls
|
||||||
|
* without shadowing variables.
|
||||||
|
*/
|
||||||
|
#define QOBJECT_INTERNAL(obj, _obj) ({ \
|
||||||
typeof(obj) _obj = (obj); \
|
typeof(obj) _obj = (obj); \
|
||||||
_obj ? container_of(&(_obj)->base, QObject, base) : NULL; \
|
_obj ? container_of(&_obj->base, QObject, base) : NULL; \
|
||||||
})
|
})
|
||||||
|
#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTFIER(_obj))
|
||||||
|
|
||||||
/* Required for qobject_to() */
|
/* Required for qobject_to() */
|
||||||
#define QTYPE_CAST_TO_QNull QTYPE_QNULL
|
#define QTYPE_CAST_TO_QNull QTYPE_QNULL
|
||||||
|
|
|
@ -157,13 +157,20 @@
|
||||||
smp_read_barrier_depends();
|
smp_read_barrier_depends();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define qatomic_rcu_read(ptr) \
|
/*
|
||||||
({ \
|
* Preprocessor sorcery ahead: use a different identifier for the
|
||||||
|
* local variable in each expansion, so we can nest macro calls
|
||||||
|
* without shadowing variables.
|
||||||
|
*/
|
||||||
|
#define qatomic_rcu_read_internal(ptr, _val) \
|
||||||
|
({ \
|
||||||
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
|
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
|
||||||
typeof_strip_qual(*ptr) _val; \
|
typeof_strip_qual(*ptr) _val; \
|
||||||
qatomic_rcu_read__nocheck(ptr, &_val); \
|
qatomic_rcu_read__nocheck(ptr, &_val); \
|
||||||
_val; \
|
_val; \
|
||||||
})
|
})
|
||||||
|
#define qatomic_rcu_read(ptr) \
|
||||||
|
qatomic_rcu_read_internal((ptr), MAKE_IDENTFIER(_val))
|
||||||
|
|
||||||
#define qatomic_rcu_set(ptr, i) do { \
|
#define qatomic_rcu_set(ptr, i) do { \
|
||||||
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
|
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
|
||||||
|
|
|
@ -37,6 +37,9 @@
|
||||||
#define tostring(s) #s
|
#define tostring(s) #s
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Expands into an identifier stemN, where N is another number each time */
|
||||||
|
#define MAKE_IDENTFIER(stem) glue(stem, __COUNTER__)
|
||||||
|
|
||||||
#ifndef likely
|
#ifndef likely
|
||||||
#define likely(x) __builtin_expect(!!(x), 1)
|
#define likely(x) __builtin_expect(!!(x), 1)
|
||||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||||
|
|
|
@ -383,19 +383,28 @@ void QEMU_ERROR("code path is reachable")
|
||||||
* determined by the pre-processor instead of the compiler, you'll
|
* determined by the pre-processor instead of the compiler, you'll
|
||||||
* have to open-code it. Sadly, Coverity is severely confused by the
|
* have to open-code it. Sadly, Coverity is severely confused by the
|
||||||
* constant variants, so we have to dumb things down there.
|
* constant variants, so we have to dumb things down there.
|
||||||
|
*
|
||||||
|
* Preprocessor sorcery ahead: use different identifiers for the local
|
||||||
|
* variables in each expansion, so we can nest macro calls without
|
||||||
|
* shadowing variables.
|
||||||
*/
|
*/
|
||||||
#undef MIN
|
#define MIN_INTERNAL(a, b, _a, _b) \
|
||||||
#define MIN(a, b) \
|
|
||||||
({ \
|
({ \
|
||||||
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
||||||
_a < _b ? _a : _b; \
|
_a < _b ? _a : _b; \
|
||||||
})
|
})
|
||||||
#undef MAX
|
#undef MIN
|
||||||
#define MAX(a, b) \
|
#define MIN(a, b) \
|
||||||
|
MIN_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
|
||||||
|
|
||||||
|
#define MAX_INTERNAL(a, b, _a, _b) \
|
||||||
({ \
|
({ \
|
||||||
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
||||||
_a > _b ? _a : _b; \
|
_a > _b ? _a : _b; \
|
||||||
})
|
})
|
||||||
|
#undef MAX
|
||||||
|
#define MAX(a, b) \
|
||||||
|
MAX_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
|
||||||
|
|
||||||
#ifdef __COVERITY__
|
#ifdef __COVERITY__
|
||||||
# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
|
# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
|
||||||
|
@ -416,14 +425,18 @@ void QEMU_ERROR("code path is reachable")
|
||||||
/*
|
/*
|
||||||
* Minimum function that returns zero only if both values are zero.
|
* Minimum function that returns zero only if both values are zero.
|
||||||
* Intended for use with unsigned values only.
|
* Intended for use with unsigned values only.
|
||||||
|
*
|
||||||
|
* Preprocessor sorcery ahead: use different identifiers for the local
|
||||||
|
* variables in each expansion, so we can nest macro calls without
|
||||||
|
* shadowing variables.
|
||||||
*/
|
*/
|
||||||
#ifndef MIN_NON_ZERO
|
#define MIN_NON_ZERO_INTERNAL(a, b, _a, _b) \
|
||||||
#define MIN_NON_ZERO(a, b) \
|
|
||||||
({ \
|
({ \
|
||||||
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
||||||
_a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \
|
_a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \
|
||||||
})
|
})
|
||||||
#endif
|
#define MIN_NON_ZERO(a, b) \
|
||||||
|
MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Round number down to multiple. Safe when m is not a power of 2 (see
|
* Round number down to multiple. Safe when m is not a power of 2 (see
|
||||||
|
|
|
@ -126,10 +126,8 @@ int qemu_fdt_add_path(void *fdt, const char *path);
|
||||||
#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
|
#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
|
||||||
do { \
|
do { \
|
||||||
uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
|
uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
|
||||||
int i; \
|
for (unsigned i_ = 0; i_ < ARRAY_SIZE(qdt_tmp); i_++) { \
|
||||||
\
|
qdt_tmp[i_] = cpu_to_be32(qdt_tmp[i_]); \
|
||||||
for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \
|
|
||||||
qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \
|
|
||||||
} \
|
} \
|
||||||
qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
|
qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
|
||||||
sizeof(qdt_tmp)); \
|
sizeof(qdt_tmp)); \
|
||||||
|
|
|
@ -367,7 +367,6 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last)
|
||||||
switch (sa_family) {
|
switch (sa_family) {
|
||||||
case AF_UNIX: {
|
case AF_UNIX: {
|
||||||
struct target_sockaddr_un *un = (struct target_sockaddr_un *)sa;
|
struct target_sockaddr_un *un = (struct target_sockaddr_un *)sa;
|
||||||
int i;
|
|
||||||
qemu_log("{sun_family=AF_UNIX,sun_path=\"");
|
qemu_log("{sun_family=AF_UNIX,sun_path=\"");
|
||||||
for (i = 0; i < addrlen -
|
for (i = 0; i < addrlen -
|
||||||
offsetof(struct target_sockaddr_un, sun_path) &&
|
offsetof(struct target_sockaddr_un, sun_path) &&
|
||||||
|
|
|
@ -440,8 +440,8 @@ static int init_blk_migration(QEMUFile *f)
|
||||||
/* Can only insert new BDSes now because doing so while iterating block
|
/* Can only insert new BDSes now because doing so while iterating block
|
||||||
* devices may end up in a deadlock (iterating the new BDSes, too). */
|
* devices may end up in a deadlock (iterating the new BDSes, too). */
|
||||||
for (i = 0; i < num_bs; i++) {
|
for (i = 0; i < num_bs; i++) {
|
||||||
BlkMigDevState *bmds = bmds_bs[i].bmds;
|
bmds = bmds_bs[i].bmds;
|
||||||
BlockDriverState *bs = bmds_bs[i].bs;
|
bs = bmds_bs[i].bs;
|
||||||
|
|
||||||
if (bmds) {
|
if (bmds) {
|
||||||
ret = blk_insert_bs(bmds->blk, bs, &local_err);
|
ret = blk_insert_bs(bmds->blk, bs, &local_err);
|
||||||
|
|
|
@ -3517,8 +3517,6 @@ int colo_init_ram_cache(void)
|
||||||
* we use the same name 'ram_bitmap' as for migration.
|
* we use the same name 'ram_bitmap' as for migration.
|
||||||
*/
|
*/
|
||||||
if (ram_bytes_total()) {
|
if (ram_bytes_total()) {
|
||||||
RAMBlock *block;
|
|
||||||
|
|
||||||
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
||||||
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
|
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
|
||||||
block->bmap = bitmap_new(pages);
|
block->bmap = bitmap_new(pages);
|
||||||
|
@ -3998,12 +3996,12 @@ static int ram_load_precopy(QEMUFile *f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (migrate_ignore_shared()) {
|
if (migrate_ignore_shared()) {
|
||||||
hwaddr addr = qemu_get_be64(f);
|
hwaddr addr2 = qemu_get_be64(f);
|
||||||
if (migrate_ram_is_ignored(block) &&
|
if (migrate_ram_is_ignored(block) &&
|
||||||
block->mr->addr != addr) {
|
block->mr->addr != addr2) {
|
||||||
error_report("Mismatched GPAs for block %s "
|
error_report("Mismatched GPAs for block %s "
|
||||||
"%" PRId64 "!= %" PRId64,
|
"%" PRId64 "!= %" PRId64,
|
||||||
id, (uint64_t)addr,
|
id, (uint64_t)addr2,
|
||||||
(uint64_t)block->mr->addr);
|
(uint64_t)block->mr->addr);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1902,9 +1902,11 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
|
||||||
* by waiting for a READY message.
|
* by waiting for a READY message.
|
||||||
*/
|
*/
|
||||||
if (rdma->control_ready_expected) {
|
if (rdma->control_ready_expected) {
|
||||||
RDMAControlHeader resp;
|
RDMAControlHeader resp_ignored;
|
||||||
ret = qemu_rdma_exchange_get_response(rdma,
|
|
||||||
&resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
|
ret = qemu_rdma_exchange_get_response(rdma, &resp_ignored,
|
||||||
|
RDMA_CONTROL_READY,
|
||||||
|
RDMA_WRID_READY);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -3282,7 +3284,8 @@ static size_t qemu_rdma_save_page(QEMUFile *f,
|
||||||
*/
|
*/
|
||||||
while (1) {
|
while (1) {
|
||||||
uint64_t wr_id, wr_id_in;
|
uint64_t wr_id, wr_id_in;
|
||||||
int ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL);
|
ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_report("rdma migration: polling error! %d", ret);
|
error_report("rdma migration: polling error! %d", ret);
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -3297,7 +3300,8 @@ static size_t qemu_rdma_save_page(QEMUFile *f,
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
uint64_t wr_id, wr_id_in;
|
uint64_t wr_id, wr_id_in;
|
||||||
int ret = qemu_rdma_poll(rdma, rdma->send_cq, &wr_id_in, NULL);
|
ret = qemu_rdma_poll(rdma, rdma->send_cq, &wr_id_in, NULL);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
error_report("rdma migration: polling error! %d", ret);
|
error_report("rdma migration: polling error! %d", ret);
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -97,7 +97,7 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (vmsd->pre_load) {
|
if (vmsd->pre_load) {
|
||||||
int ret = vmsd->pre_load(opaque);
|
ret = vmsd->pre_load(opaque);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -432,8 +432,6 @@ _eth_get_rss_ex_src_addr(const struct iovec *pkt, int pkt_frags,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opthdr.type == IP6_OPT_HOME) {
|
if (opthdr.type == IP6_OPT_HOME) {
|
||||||
size_t input_size = iov_size(pkt, pkt_frags);
|
|
||||||
|
|
||||||
if (input_size < opt_offset + sizeof(opthdr)) {
|
if (input_size < opt_offset + sizeof(opthdr)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -939,7 +939,6 @@ int main(int argc, char **argv)
|
||||||
g_autoptr(GError) err = NULL;
|
g_autoptr(GError) err = NULL;
|
||||||
int stderr_fd[2];
|
int stderr_fd[2];
|
||||||
pid_t pid;
|
pid_t pid;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!g_unix_open_pipe(stderr_fd, FD_CLOEXEC, &err)) {
|
if (!g_unix_open_pipe(stderr_fd, FD_CLOEXEC, &err)) {
|
||||||
error_report("Error setting up communication pipe: %s",
|
error_report("Error setting up communication pipe: %s",
|
||||||
|
@ -1172,7 +1171,6 @@ int main(int argc, char **argv)
|
||||||
|
|
||||||
if (opts.device) {
|
if (opts.device) {
|
||||||
#if HAVE_NBD_DEVICE
|
#if HAVE_NBD_DEVICE
|
||||||
int ret;
|
|
||||||
ret = pthread_create(&client_thread, NULL, nbd_client_thread, &opts);
|
ret = pthread_create(&client_thread, NULL, nbd_client_thread, &opts);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
error_report("Failed to create client thread: %s", strerror(ret));
|
error_report("Failed to create client thread: %s", strerror(ret));
|
||||||
|
@ -1219,9 +1217,10 @@ int main(int argc, char **argv)
|
||||||
qemu_opts_del(sn_opts);
|
qemu_opts_del(sn_opts);
|
||||||
|
|
||||||
if (opts.device) {
|
if (opts.device) {
|
||||||
void *ret;
|
void *result;
|
||||||
pthread_join(client_thread, &ret);
|
pthread_join(client_thread, &result);
|
||||||
exit(ret != NULL);
|
ret = (intptr_t)result;
|
||||||
|
exit(ret);
|
||||||
} else {
|
} else {
|
||||||
exit(EXIT_SUCCESS);
|
exit(EXIT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
|
@ -418,9 +418,9 @@ int qemu_fdt_setprop_string_array(void *fdt, const char *node_path,
|
||||||
}
|
}
|
||||||
p = str = g_malloc0(total_len);
|
p = str = g_malloc0(total_len);
|
||||||
for (i = 0; i < len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
int len = strlen(array[i]) + 1;
|
int offset = strlen(array[i]) + 1;
|
||||||
pstrcpy(p, len, array[i]);
|
pstrcpy(p, offset, array[i]);
|
||||||
p += len;
|
p += offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = qemu_fdt_setprop(fdt, node_path, prop, str, total_len);
|
ret = qemu_fdt_setprop(fdt, node_path, prop, str, total_len);
|
||||||
|
|
|
@ -3245,7 +3245,6 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mr->alias) {
|
if (mr->alias) {
|
||||||
MemoryRegionList *ml;
|
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
|
||||||
/* check if the alias is already in the queue */
|
/* check if the alias is already in the queue */
|
||||||
|
|
|
@ -913,16 +913,16 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
|
||||||
|
|
||||||
while (page < end) {
|
while (page < end) {
|
||||||
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
|
||||||
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
|
unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE;
|
||||||
unsigned long num = MIN(end - page,
|
unsigned long num = MIN(end - page,
|
||||||
DIRTY_MEMORY_BLOCK_SIZE - offset);
|
DIRTY_MEMORY_BLOCK_SIZE - ofs);
|
||||||
|
|
||||||
assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
|
assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL)));
|
||||||
assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
|
assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
|
||||||
offset >>= BITS_PER_LEVEL;
|
ofs >>= BITS_PER_LEVEL;
|
||||||
|
|
||||||
bitmap_copy_and_clear_atomic(snap->dirty + dest,
|
bitmap_copy_and_clear_atomic(snap->dirty + dest,
|
||||||
blocks->blocks[idx] + offset,
|
blocks->blocks[idx] + ofs,
|
||||||
num);
|
num);
|
||||||
page += num;
|
page += num;
|
||||||
dest += num >> BITS_PER_LEVEL;
|
dest += num >> BITS_PER_LEVEL;
|
||||||
|
|
|
@ -283,9 +283,9 @@ static uint32_t qemu_seccomp_update_action(uint32_t action)
|
||||||
if (action == SCMP_ACT_TRAP) {
|
if (action == SCMP_ACT_TRAP) {
|
||||||
static int kill_process = -1;
|
static int kill_process = -1;
|
||||||
if (kill_process == -1) {
|
if (kill_process == -1) {
|
||||||
uint32_t action = SECCOMP_RET_KILL_PROCESS;
|
uint32_t testaction = SECCOMP_RET_KILL_PROCESS;
|
||||||
|
|
||||||
if (qemu_seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &action) == 0) {
|
if (qemu_seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &testaction) == 0) {
|
||||||
kill_process = 1;
|
kill_process = 1;
|
||||||
} else {
|
} else {
|
||||||
kill_process = 0;
|
kill_process = 0;
|
||||||
|
|
|
@ -1934,16 +1934,16 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||||
uint32_t rt = (syndrome >> 5) & 0x1f;
|
uint32_t rt = (syndrome >> 5) & 0x1f;
|
||||||
uint32_t reg = syndrome & SYSREG_MASK;
|
uint32_t reg = syndrome & SYSREG_MASK;
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
int ret = 0;
|
int sysreg_ret = 0;
|
||||||
|
|
||||||
if (isread) {
|
if (isread) {
|
||||||
ret = hvf_sysreg_read(cpu, reg, rt);
|
sysreg_ret = hvf_sysreg_read(cpu, reg, rt);
|
||||||
} else {
|
} else {
|
||||||
val = hvf_get_reg(cpu, rt);
|
val = hvf_get_reg(cpu, rt);
|
||||||
ret = hvf_sysreg_write(cpu, reg, val);
|
sysreg_ret = hvf_sysreg_write(cpu, reg, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
advance_pc = !ret;
|
advance_pc = !sysreg_ret;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case EC_WFX_TRAP:
|
case EC_WFX_TRAP:
|
||||||
|
|
|
@ -925,8 +925,8 @@ DO_1OP_IMM(vorri, DO_ORRI)
|
||||||
bool qc = false; \
|
bool qc = false; \
|
||||||
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
||||||
bool sat = false; \
|
bool sat = false; \
|
||||||
TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
|
TYPE r_ = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
|
||||||
mergemask(&d[H##ESIZE(e)], r, mask); \
|
mergemask(&d[H##ESIZE(e)], r_, mask); \
|
||||||
qc |= sat & mask & 1; \
|
qc |= sat & mask & 1; \
|
||||||
} \
|
} \
|
||||||
if (qc) { \
|
if (qc) { \
|
||||||
|
@ -1250,11 +1250,11 @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
|
||||||
#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
|
#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
|
||||||
({ \
|
({ \
|
||||||
uint32_t su32 = 0; \
|
uint32_t su32 = 0; \
|
||||||
typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
|
typeof(N) qrshl_ret = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
|
||||||
if (su32) { \
|
if (su32) { \
|
||||||
*satp = true; \
|
*satp = true; \
|
||||||
} \
|
} \
|
||||||
r; \
|
qrshl_ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define DO_SQSHL_OP(N, M, satp) \
|
#define DO_SQSHL_OP(N, M, satp) \
|
||||||
|
@ -1292,12 +1292,12 @@ DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
|
||||||
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
||||||
bool sat = false; \
|
bool sat = false; \
|
||||||
if ((e & 1) == XCHG) { \
|
if ((e & 1) == XCHG) { \
|
||||||
TYPE r = FN(n[H##ESIZE(e)], \
|
TYPE vqdmladh_ret = FN(n[H##ESIZE(e)], \
|
||||||
m[H##ESIZE(e - XCHG)], \
|
m[H##ESIZE(e - XCHG)], \
|
||||||
n[H##ESIZE(e + (1 - 2 * XCHG))], \
|
n[H##ESIZE(e + (1 - 2 * XCHG))], \
|
||||||
m[H##ESIZE(e + (1 - XCHG))], \
|
m[H##ESIZE(e + (1 - XCHG))], \
|
||||||
ROUND, &sat); \
|
ROUND, &sat); \
|
||||||
mergemask(&d[H##ESIZE(e)], r, mask); \
|
mergemask(&d[H##ESIZE(e)], vqdmladh_ret, mask); \
|
||||||
qc |= sat & mask & 1; \
|
qc |= sat & mask & 1; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
|
@ -2454,7 +2454,7 @@ static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
|
||||||
return extval;
|
return extval;
|
||||||
}
|
}
|
||||||
} else if (shift < 48) {
|
} else if (shift < 48) {
|
||||||
int64_t extval = sextract64(src << shift, 0, 48);
|
extval = sextract64(src << shift, 0, 48);
|
||||||
if (!sat || src == (extval >> shift)) {
|
if (!sat || src == (extval >> shift)) {
|
||||||
return extval;
|
return extval;
|
||||||
}
|
}
|
||||||
|
@ -2486,7 +2486,7 @@ static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
|
||||||
return extval;
|
return extval;
|
||||||
}
|
}
|
||||||
} else if (shift < 48) {
|
} else if (shift < 48) {
|
||||||
uint64_t extval = extract64(src << shift, 0, 48);
|
extval = extract64(src << shift, 0, 48);
|
||||||
if (!sat || src == (extval >> shift)) {
|
if (!sat || src == (extval >> shift)) {
|
||||||
return extval;
|
return extval;
|
||||||
}
|
}
|
||||||
|
|
|
@ -506,7 +506,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
|
||||||
|
|
||||||
gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
|
gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
|
||||||
/* fpInactive case: reads as FPDSCR_NS */
|
/* fpInactive case: reads as FPDSCR_NS */
|
||||||
TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
|
tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
|
||||||
storefn(s, opaque, tmp, true);
|
storefn(s, opaque, tmp, true);
|
||||||
lab_end = gen_new_label();
|
lab_end = gen_new_label();
|
||||||
tcg_gen_br(lab_end);
|
tcg_gen_br(lab_end);
|
||||||
|
|
|
@ -824,7 +824,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
|
||||||
reg = get_areg(s, reg0);
|
reg = get_areg(s, reg0);
|
||||||
result = gen_ldst(s, opsize, reg, val, what, index);
|
result = gen_ldst(s, opsize, reg, val, what, index);
|
||||||
if (what == EA_STORE || !addrp) {
|
if (what == EA_STORE || !addrp) {
|
||||||
TCGv tmp = tcg_temp_new();
|
tmp = tcg_temp_new();
|
||||||
if (reg0 == 7 && opsize == OS_BYTE &&
|
if (reg0 == 7 && opsize == OS_BYTE &&
|
||||||
m68k_feature(s->env, M68K_FEATURE_M68K)) {
|
m68k_feature(s->env, M68K_FEATURE_M68K)) {
|
||||||
tcg_gen_addi_i32(tmp, reg, 2);
|
tcg_gen_addi_i32(tmp, reg, 2);
|
||||||
|
|
|
@ -7432,15 +7432,15 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
|
||||||
|
|
||||||
#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
|
#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
|
||||||
do { \
|
do { \
|
||||||
float_status *status = &env->active_tc.msa_fp_status; \
|
float_status *status_ = &env->active_tc.msa_fp_status; \
|
||||||
int c; \
|
int c; \
|
||||||
\
|
\
|
||||||
set_float_exception_flags(0, status); \
|
set_float_exception_flags(0, status_); \
|
||||||
DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
|
DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status_); \
|
||||||
c = update_msacsr(env, 0, 0); \
|
c = update_msacsr(env, 0, 0); \
|
||||||
\
|
\
|
||||||
if (get_enabled_exceptions(env, c)) { \
|
if (get_enabled_exceptions(env, c)) { \
|
||||||
DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
|
DEST = ((FLOAT_SNAN ## BITS(status_) >> 6) << 6) | c; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -4407,8 +4407,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||||
case NM_BPOSGE32C:
|
case NM_BPOSGE32C:
|
||||||
check_dsp_r3(ctx);
|
check_dsp_r3(ctx);
|
||||||
{
|
{
|
||||||
int32_t imm = extract32(ctx->opcode, 1, 13) |
|
imm = extract32(ctx->opcode, 1, 13)
|
||||||
extract32(ctx->opcode, 0, 1) << 13;
|
| extract32(ctx->opcode, 0, 1) << 13;
|
||||||
|
|
||||||
gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2,
|
gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2,
|
||||||
imm << 1);
|
imm << 1);
|
||||||
|
@ -4635,7 +4635,7 @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx)
|
||||||
break;
|
break;
|
||||||
case NM_LI16:
|
case NM_LI16:
|
||||||
{
|
{
|
||||||
int imm = extract32(ctx->opcode, 0, 7);
|
imm = extract32(ctx->opcode, 0, 7);
|
||||||
imm = (imm == 0x7f ? -1 : imm);
|
imm = (imm == 0x7f ? -1 : imm);
|
||||||
if (rt != 0) {
|
if (rt != 0) {
|
||||||
tcg_gen_movi_tl(cpu_gpr[rt], imm);
|
tcg_gen_movi_tl(cpu_gpr[rt], imm);
|
||||||
|
|
|
@ -15563,10 +15563,8 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
|
||||||
|
|
||||||
void mips_tcg_init(void)
|
void mips_tcg_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
|
|
||||||
cpu_gpr[0] = NULL;
|
cpu_gpr[0] = NULL;
|
||||||
for (i = 1; i < 32; i++)
|
for (unsigned i = 1; i < 32; i++)
|
||||||
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
|
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
|
||||||
offsetof(CPUMIPSState,
|
offsetof(CPUMIPSState,
|
||||||
active_tc.gpr[i]),
|
active_tc.gpr[i]),
|
||||||
|
@ -15583,7 +15581,7 @@ void mips_tcg_init(void)
|
||||||
rname);
|
rname);
|
||||||
}
|
}
|
||||||
#endif /* !TARGET_MIPS64 */
|
#endif /* !TARGET_MIPS64 */
|
||||||
for (i = 0; i < 32; i++) {
|
for (unsigned i = 0; i < 32; i++) {
|
||||||
int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
|
int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
|
||||||
|
|
||||||
fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
|
fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
|
||||||
|
@ -15591,7 +15589,7 @@ void mips_tcg_init(void)
|
||||||
msa_translate_init();
|
msa_translate_init();
|
||||||
cpu_PC = tcg_global_mem_new(cpu_env,
|
cpu_PC = tcg_global_mem_new(cpu_env,
|
||||||
offsetof(CPUMIPSState, active_tc.PC), "PC");
|
offsetof(CPUMIPSState, active_tc.PC), "PC");
|
||||||
for (i = 0; i < MIPS_DSP_ACC; i++) {
|
for (unsigned i = 0; i < MIPS_DSP_ACC; i++) {
|
||||||
cpu_HI[i] = tcg_global_mem_new(cpu_env,
|
cpu_HI[i] = tcg_global_mem_new(cpu_env,
|
||||||
offsetof(CPUMIPSState, active_tc.HI[i]),
|
offsetof(CPUMIPSState, active_tc.HI[i]),
|
||||||
regnames_HI[i]);
|
regnames_HI[i]);
|
||||||
|
|
|
@ -704,7 +704,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
||||||
CSR_MPMMASK,
|
CSR_MPMMASK,
|
||||||
};
|
};
|
||||||
|
|
||||||
for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
|
for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
|
||||||
int csrno = dump_csrs[i];
|
int csrno = dump_csrs[i];
|
||||||
target_ulong val = 0;
|
target_ulong val = 0;
|
||||||
RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
|
RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
|
||||||
|
@ -747,7 +747,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
||||||
CSR_VTYPE,
|
CSR_VTYPE,
|
||||||
CSR_VLENB,
|
CSR_VLENB,
|
||||||
};
|
};
|
||||||
for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
|
for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
|
||||||
int csrno = dump_rvv_csrs[i];
|
int csrno = dump_rvv_csrs[i];
|
||||||
target_ulong val = 0;
|
target_ulong val = 0;
|
||||||
RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
|
RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
|
||||||
|
|
|
@ -516,7 +516,7 @@ ProbeSuccess:
|
||||||
k++;
|
k++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
target_ulong addr = base + ((i * nf + k) << log2_esz);
|
addr = base + ((i * nf + k) << log2_esz);
|
||||||
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
||||||
k++;
|
k++;
|
||||||
}
|
}
|
||||||
|
@ -4791,9 +4791,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
||||||
uint32_t vta = vext_vta(desc); \
|
uint32_t vta = vext_vta(desc); \
|
||||||
uint32_t vma = vext_vma(desc); \
|
uint32_t vma = vext_vma(desc); \
|
||||||
target_ulong i_max, i; \
|
target_ulong i_max, i_min, i; \
|
||||||
\
|
\
|
||||||
i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
|
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
|
||||||
|
i_max = MAX(i_min, env->vstart); \
|
||||||
for (i = env->vstart; i < i_max; ++i) { \
|
for (i = env->vstart; i < i_max; ++i) { \
|
||||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||||
/* set masked-off elements to 1s */ \
|
/* set masked-off elements to 1s */ \
|
||||||
|
|
|
@ -4962,8 +4962,6 @@ static void decode_rc_logical_shift(DisasContext *ctx)
|
||||||
const9 = MASK_OP_RC_CONST9(ctx->opcode);
|
const9 = MASK_OP_RC_CONST9(ctx->opcode);
|
||||||
op2 = MASK_OP_RC_OP2(ctx->opcode);
|
op2 = MASK_OP_RC_OP2(ctx->opcode);
|
||||||
|
|
||||||
temp = tcg_temp_new();
|
|
||||||
|
|
||||||
switch (op2) {
|
switch (op2) {
|
||||||
case OPC2_32_RC_AND:
|
case OPC2_32_RC_AND:
|
||||||
tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
|
tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
|
||||||
|
@ -4972,10 +4970,12 @@ static void decode_rc_logical_shift(DisasContext *ctx)
|
||||||
tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
|
tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
|
||||||
break;
|
break;
|
||||||
case OPC2_32_RC_NAND:
|
case OPC2_32_RC_NAND:
|
||||||
|
temp = tcg_temp_new();
|
||||||
tcg_gen_movi_tl(temp, const9);
|
tcg_gen_movi_tl(temp, const9);
|
||||||
tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
|
tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
|
||||||
break;
|
break;
|
||||||
case OPC2_32_RC_NOR:
|
case OPC2_32_RC_NOR:
|
||||||
|
temp = tcg_temp_new();
|
||||||
tcg_gen_movi_tl(temp, const9);
|
tcg_gen_movi_tl(temp, const9);
|
||||||
tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
|
tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
|
||||||
break;
|
break;
|
||||||
|
@ -5013,7 +5013,7 @@ static void decode_rc_logical_shift(DisasContext *ctx)
|
||||||
break;
|
break;
|
||||||
case OPC2_32_RC_SHUFFLE:
|
case OPC2_32_RC_SHUFFLE:
|
||||||
if (has_feature(ctx, TRICORE_FEATURE_162)) {
|
if (has_feature(ctx, TRICORE_FEATURE_162)) {
|
||||||
TCGv temp = tcg_constant_i32(const9);
|
temp = tcg_constant_i32(const9);
|
||||||
gen_helper_shuffle(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
|
gen_helper_shuffle(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
|
||||||
} else {
|
} else {
|
||||||
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
|
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
|
||||||
|
|
16
tcg/tcg.c
16
tcg/tcg.c
|
@ -2549,21 +2549,21 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
|
||||||
{
|
{
|
||||||
const char *s_al, *s_op, *s_at;
|
const char *s_al, *s_op, *s_at;
|
||||||
MemOpIdx oi = op->args[k++];
|
MemOpIdx oi = op->args[k++];
|
||||||
MemOp op = get_memop(oi);
|
MemOp mop = get_memop(oi);
|
||||||
unsigned ix = get_mmuidx(oi);
|
unsigned ix = get_mmuidx(oi);
|
||||||
|
|
||||||
s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
|
s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
|
||||||
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
|
s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
|
||||||
s_at = atom_name[(op & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
|
s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
|
||||||
op &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
|
mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
|
||||||
|
|
||||||
/* If all fields are accounted for, print symbolically. */
|
/* If all fields are accounted for, print symbolically. */
|
||||||
if (!op && s_al && s_op && s_at) {
|
if (!mop && s_al && s_op && s_at) {
|
||||||
col += ne_fprintf(f, ",%s%s%s,%u",
|
col += ne_fprintf(f, ",%s%s%s,%u",
|
||||||
s_at, s_al, s_op, ix);
|
s_at, s_al, s_op, ix);
|
||||||
} else {
|
} else {
|
||||||
op = get_memop(oi);
|
mop = get_memop(oi);
|
||||||
col += ne_fprintf(f, ",$0x%x,%u", op, ix);
|
col += ne_fprintf(f, ",$0x%x,%u", mop, ix);
|
||||||
}
|
}
|
||||||
i = 1;
|
i = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -625,7 +625,7 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
|
||||||
throttle_config_init(&cfg);
|
throttle_config_init(&cfg);
|
||||||
|
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
BucketType index = to_test[is_ops][i];
|
index = to_test[is_ops][i];
|
||||||
cfg.buckets[index].avg = avg;
|
cfg.buckets[index].avg = avg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
14
ui/gtk.c
14
ui/gtk.c
|
@ -930,8 +930,8 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion,
|
||||||
GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win);
|
GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win);
|
||||||
GdkRectangle geometry;
|
GdkRectangle geometry;
|
||||||
|
|
||||||
int x = (int)motion->x_root;
|
int xr = (int)motion->x_root;
|
||||||
int y = (int)motion->y_root;
|
int yr = (int)motion->y_root;
|
||||||
|
|
||||||
gdk_monitor_get_geometry(monitor, &geometry);
|
gdk_monitor_get_geometry(monitor, &geometry);
|
||||||
|
|
||||||
|
@ -942,13 +942,13 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion,
|
||||||
* may still be only half way across the screen. Without
|
* may still be only half way across the screen. Without
|
||||||
* this warp, the server pointer would thus appear to hit
|
* this warp, the server pointer would thus appear to hit
|
||||||
* an invisible wall */
|
* an invisible wall */
|
||||||
if (x <= geometry.x || x - geometry.x >= geometry.width - 1 ||
|
if (xr <= geometry.x || xr - geometry.x >= geometry.width - 1 ||
|
||||||
y <= geometry.y || y - geometry.y >= geometry.height - 1) {
|
yr <= geometry.y || yr - geometry.y >= geometry.height - 1) {
|
||||||
GdkDevice *dev = gdk_event_get_device((GdkEvent *)motion);
|
GdkDevice *dev = gdk_event_get_device((GdkEvent *)motion);
|
||||||
x = geometry.x + geometry.width / 2;
|
xr = geometry.x + geometry.width / 2;
|
||||||
y = geometry.y + geometry.height / 2;
|
yr = geometry.y + geometry.height / 2;
|
||||||
|
|
||||||
gdk_device_warp(dev, screen, x, y);
|
gdk_device_warp(dev, screen, xr, yr);
|
||||||
s->last_set = FALSE;
|
s->last_set = FALSE;
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1081,15 +1081,16 @@ static void qemu_spice_gl_update(DisplayChangeListener *dcl,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (render_cursor) {
|
if (render_cursor) {
|
||||||
int x, y;
|
int ptr_x, ptr_y;
|
||||||
|
|
||||||
qemu_mutex_lock(&ssd->lock);
|
qemu_mutex_lock(&ssd->lock);
|
||||||
x = ssd->ptr_x;
|
ptr_x = ssd->ptr_x;
|
||||||
y = ssd->ptr_y;
|
ptr_y = ssd->ptr_y;
|
||||||
qemu_mutex_unlock(&ssd->lock);
|
qemu_mutex_unlock(&ssd->lock);
|
||||||
egl_texture_blit(ssd->gls, &ssd->blit_fb, &ssd->guest_fb,
|
egl_texture_blit(ssd->gls, &ssd->blit_fb, &ssd->guest_fb,
|
||||||
!y_0_top);
|
!y_0_top);
|
||||||
egl_texture_blend(ssd->gls, &ssd->blit_fb, &ssd->cursor_fb,
|
egl_texture_blend(ssd->gls, &ssd->blit_fb, &ssd->cursor_fb,
|
||||||
!y_0_top, x, y, 1.0, 1.0);
|
!y_0_top, ptr_x, ptr_y, 1.0, 1.0);
|
||||||
glFlush();
|
glFlush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,11 +153,12 @@ static void ZRLE_ENCODE_TILE(VncState *vs, ZRLE_PIXEL *data, int w, int h,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_rle) {
|
if (use_rle) {
|
||||||
ZRLE_PIXEL *ptr = data;
|
|
||||||
ZRLE_PIXEL *end = ptr + w * h;
|
|
||||||
ZRLE_PIXEL *run_start;
|
ZRLE_PIXEL *run_start;
|
||||||
ZRLE_PIXEL pix;
|
ZRLE_PIXEL pix;
|
||||||
|
|
||||||
|
ptr = data;
|
||||||
|
end = ptr + w * h;
|
||||||
|
|
||||||
while (ptr < end) {
|
while (ptr < end) {
|
||||||
int len;
|
int len;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
|
@ -198,7 +199,7 @@ static void ZRLE_ENCODE_TILE(VncState *vs, ZRLE_PIXEL *data, int w, int h,
|
||||||
}
|
}
|
||||||
} else if (use_palette) { /* no RLE */
|
} else if (use_palette) { /* no RLE */
|
||||||
int bppp;
|
int bppp;
|
||||||
ZRLE_PIXEL *ptr = data;
|
ptr = data;
|
||||||
|
|
||||||
/* packed pixels */
|
/* packed pixels */
|
||||||
|
|
||||||
|
@ -241,8 +242,6 @@ static void ZRLE_ENCODE_TILE(VncState *vs, ZRLE_PIXEL *data, int w, int h,
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
#ifdef ZRLE_COMPACT_PIXEL
|
#ifdef ZRLE_COMPACT_PIXEL
|
||||||
ZRLE_PIXEL *ptr;
|
|
||||||
|
|
||||||
for (ptr = data; ptr < data + w * h; ptr++) {
|
for (ptr = data; ptr < data + w * h; ptr++) {
|
||||||
ZRLE_WRITE_PIXEL(vs, *ptr);
|
ZRLE_WRITE_PIXEL(vs, *ptr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,8 +86,6 @@ int palette_put(VncPalette *palette, uint32_t color)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
VncPaletteEntry *entry;
|
|
||||||
|
|
||||||
entry = &palette->pool[palette->size];
|
entry = &palette->pool[palette->size];
|
||||||
entry->color = color;
|
entry->color = color;
|
||||||
entry->idx = idx;
|
entry->idx = idx;
|
||||||
|
|
12
ui/vnc.c
12
ui/vnc.c
|
@ -1584,15 +1584,15 @@ static void vnc_jobs_bh(void *opaque)
|
||||||
*/
|
*/
|
||||||
static int vnc_client_read(VncState *vs)
|
static int vnc_client_read(VncState *vs)
|
||||||
{
|
{
|
||||||
size_t ret;
|
size_t sz;
|
||||||
|
|
||||||
#ifdef CONFIG_VNC_SASL
|
#ifdef CONFIG_VNC_SASL
|
||||||
if (vs->sasl.conn && vs->sasl.runSSF)
|
if (vs->sasl.conn && vs->sasl.runSSF)
|
||||||
ret = vnc_client_read_sasl(vs);
|
sz = vnc_client_read_sasl(vs);
|
||||||
else
|
else
|
||||||
#endif /* CONFIG_VNC_SASL */
|
#endif /* CONFIG_VNC_SASL */
|
||||||
ret = vnc_client_read_plain(vs);
|
sz = vnc_client_read_plain(vs);
|
||||||
if (!ret) {
|
if (!sz) {
|
||||||
if (vs->disconnecting) {
|
if (vs->disconnecting) {
|
||||||
vnc_disconnect_finish(vs);
|
vnc_disconnect_finish(vs);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -3118,8 +3118,8 @@ static int vnc_refresh_server_surface(VncDisplay *vd)
|
||||||
cmp_bytes = MIN(VNC_DIRTY_PIXELS_PER_BIT * VNC_SERVER_FB_BYTES,
|
cmp_bytes = MIN(VNC_DIRTY_PIXELS_PER_BIT * VNC_SERVER_FB_BYTES,
|
||||||
server_stride);
|
server_stride);
|
||||||
if (vd->guest.format != VNC_SERVER_FB_FORMAT) {
|
if (vd->guest.format != VNC_SERVER_FB_FORMAT) {
|
||||||
int width = pixman_image_get_width(vd->server);
|
int w = pixman_image_get_width(vd->server);
|
||||||
tmpbuf = qemu_pixman_linebuf_create(VNC_SERVER_FB_FORMAT, width);
|
tmpbuf = qemu_pixman_linebuf_create(VNC_SERVER_FB_FORMAT, w);
|
||||||
} else {
|
} else {
|
||||||
int guest_bpp =
|
int guest_bpp =
|
||||||
PIXMAN_FORMAT_BPP(pixman_image_get_format(vd->guest.fb));
|
PIXMAN_FORMAT_BPP(pixman_image_get_format(vd->guest.fb));
|
||||||
|
|
|
@ -278,7 +278,7 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt,
|
||||||
VuFdWatch *vu_fd_watch = find_vu_fd_watch(server, fd);
|
VuFdWatch *vu_fd_watch = find_vu_fd_watch(server, fd);
|
||||||
|
|
||||||
if (!vu_fd_watch) {
|
if (!vu_fd_watch) {
|
||||||
VuFdWatch *vu_fd_watch = g_new0(VuFdWatch, 1);
|
vu_fd_watch = g_new0(VuFdWatch, 1);
|
||||||
|
|
||||||
QTAILQ_INSERT_TAIL(&server->vu_fd_watches, vu_fd_watch, next);
|
QTAILQ_INSERT_TAIL(&server->vu_fd_watches, vu_fd_watch, next);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue