mirror of https://github.com/xemu-project/xemu.git
kvm: Fix coding style violations
No functional changes. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
b9bec74bcb
commit
a426e12217
139
kvm-all.c
139
kvm-all.c
|
@ -88,10 +88,12 @@ static KVMSlot *kvm_alloc_slot(KVMState *s)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
||||
/* KVM private memory slots */
|
||||
if (i >= 8 && i < 12)
|
||||
if (i >= 8 && i < 12) {
|
||||
continue;
|
||||
if (s->slots[i].memory_size == 0)
|
||||
}
|
||||
if (s->slots[i].memory_size == 0) {
|
||||
return &s->slots[i];
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: no free slot available\n", __func__);
|
||||
|
@ -226,9 +228,10 @@ int kvm_init_vcpu(CPUState *env)
|
|||
}
|
||||
|
||||
#ifdef KVM_CAP_COALESCED_MMIO
|
||||
if (s->coalesced_mmio && !s->coalesced_mmio_ring)
|
||||
s->coalesced_mmio_ring = (void *) env->kvm_run +
|
||||
s->coalesced_mmio * PAGE_SIZE;
|
||||
if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
|
||||
s->coalesced_mmio_ring =
|
||||
(void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = kvm_arch_init_vcpu(env);
|
||||
|
@ -275,16 +278,14 @@ static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
|
|||
|
||||
int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
|
||||
{
|
||||
return kvm_dirty_pages_log_change(phys_addr, size,
|
||||
KVM_MEM_LOG_DIRTY_PAGES,
|
||||
KVM_MEM_LOG_DIRTY_PAGES);
|
||||
return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES,
|
||||
KVM_MEM_LOG_DIRTY_PAGES);
|
||||
}
|
||||
|
||||
int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
|
||||
{
|
||||
return kvm_dirty_pages_log_change(phys_addr, size,
|
||||
0,
|
||||
KVM_MEM_LOG_DIRTY_PAGES);
|
||||
return kvm_dirty_pages_log_change(phys_addr, size, 0,
|
||||
KVM_MEM_LOG_DIRTY_PAGES);
|
||||
}
|
||||
|
||||
static int kvm_set_migration_log(int enable)
|
||||
|
@ -356,7 +357,7 @@ static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
|
|||
* @end_addr: end of logged region.
|
||||
*/
|
||||
static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
|
||||
target_phys_addr_t end_addr)
|
||||
target_phys_addr_t end_addr)
|
||||
{
|
||||
KVMState *s = kvm_state;
|
||||
unsigned long size, allocated_size = 0;
|
||||
|
@ -480,9 +481,8 @@ static int kvm_check_many_ioeventfds(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void kvm_set_phys_mem(target_phys_addr_t start_addr,
|
||||
ram_addr_t size,
|
||||
ram_addr_t phys_offset)
|
||||
static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
|
||||
ram_addr_t phys_offset)
|
||||
{
|
||||
KVMState *s = kvm_state;
|
||||
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
|
||||
|
@ -589,13 +589,13 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr,
|
|||
}
|
||||
|
||||
/* in case the KVM bug workaround already "consumed" the new slot */
|
||||
if (!size)
|
||||
if (!size) {
|
||||
return;
|
||||
|
||||
}
|
||||
/* KVM does not need to know about this memory */
|
||||
if (flags >= IO_MEM_UNASSIGNED)
|
||||
if (flags >= IO_MEM_UNASSIGNED) {
|
||||
return;
|
||||
|
||||
}
|
||||
mem = kvm_alloc_slot(s);
|
||||
mem->memory_size = size;
|
||||
mem->start_addr = start_addr;
|
||||
|
@ -611,30 +611,29 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr,
|
|||
}
|
||||
|
||||
static void kvm_client_set_memory(struct CPUPhysMemoryClient *client,
|
||||
target_phys_addr_t start_addr,
|
||||
ram_addr_t size,
|
||||
ram_addr_t phys_offset)
|
||||
target_phys_addr_t start_addr,
|
||||
ram_addr_t size, ram_addr_t phys_offset)
|
||||
{
|
||||
kvm_set_phys_mem(start_addr, size, phys_offset);
|
||||
kvm_set_phys_mem(start_addr, size, phys_offset);
|
||||
}
|
||||
|
||||
static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
|
||||
target_phys_addr_t start_addr,
|
||||
target_phys_addr_t end_addr)
|
||||
target_phys_addr_t start_addr,
|
||||
target_phys_addr_t end_addr)
|
||||
{
|
||||
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
|
||||
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
|
||||
}
|
||||
|
||||
static int kvm_client_migration_log(struct CPUPhysMemoryClient *client,
|
||||
int enable)
|
||||
int enable)
|
||||
{
|
||||
return kvm_set_migration_log(enable);
|
||||
return kvm_set_migration_log(enable);
|
||||
}
|
||||
|
||||
static CPUPhysMemoryClient kvm_cpu_phys_memory_client = {
|
||||
.set_memory = kvm_client_set_memory,
|
||||
.sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
|
||||
.migration_log = kvm_client_migration_log,
|
||||
.set_memory = kvm_client_set_memory,
|
||||
.sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
|
||||
.migration_log = kvm_client_migration_log,
|
||||
};
|
||||
|
||||
int kvm_init(int smp_cpus)
|
||||
|
@ -651,9 +650,9 @@ int kvm_init(int smp_cpus)
|
|||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
QTAILQ_INIT(&s->kvm_sw_breakpoints);
|
||||
#endif
|
||||
for (i = 0; i < ARRAY_SIZE(s->slots); i++)
|
||||
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
|
||||
s->slots[i].slot = i;
|
||||
|
||||
}
|
||||
s->vmfd = -1;
|
||||
s->fd = qemu_open("/dev/kvm", O_RDWR);
|
||||
if (s->fd == -1) {
|
||||
|
@ -664,8 +663,9 @@ int kvm_init(int smp_cpus)
|
|||
|
||||
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
|
||||
if (ret < KVM_API_VERSION) {
|
||||
if (ret > 0)
|
||||
if (ret > 0) {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
fprintf(stderr, "kvm version too old\n");
|
||||
goto err;
|
||||
}
|
||||
|
@ -750,8 +750,9 @@ int kvm_init(int smp_cpus)
|
|||
#endif
|
||||
|
||||
ret = kvm_arch_init(s, smp_cpus);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
kvm_state = s;
|
||||
cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client);
|
||||
|
@ -762,10 +763,12 @@ int kvm_init(int smp_cpus)
|
|||
|
||||
err:
|
||||
if (s) {
|
||||
if (s->vmfd != -1)
|
||||
if (s->vmfd != -1) {
|
||||
close(s->vmfd);
|
||||
if (s->fd != -1)
|
||||
}
|
||||
if (s->fd != -1) {
|
||||
close(s->fd);
|
||||
}
|
||||
}
|
||||
qemu_free(s);
|
||||
|
||||
|
@ -829,8 +832,9 @@ static void kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
|
|||
cpu_dump_state(env, stderr, fprintf, 0);
|
||||
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
||||
fprintf(stderr, "emulation failure\n");
|
||||
if (!kvm_arch_stop_on_emulation_error(env))
|
||||
return;
|
||||
if (!kvm_arch_stop_on_emulation_error(env)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* FIXME: Should trigger a qmp message to let management know
|
||||
* something went wrong.
|
||||
|
@ -870,8 +874,9 @@ static void do_kvm_cpu_synchronize_state(void *_env)
|
|||
|
||||
void kvm_cpu_synchronize_state(CPUState *env)
|
||||
{
|
||||
if (!env->kvm_vcpu_dirty)
|
||||
if (!env->kvm_vcpu_dirty) {
|
||||
run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_post_reset(CPUState *env)
|
||||
|
@ -1011,9 +1016,9 @@ int kvm_ioctl(KVMState *s, int type, ...)
|
|||
va_end(ap);
|
||||
|
||||
ret = ioctl(s->fd, type, arg);
|
||||
if (ret == -1)
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1028,9 +1033,9 @@ int kvm_vm_ioctl(KVMState *s, int type, ...)
|
|||
va_end(ap);
|
||||
|
||||
ret = ioctl(s->vmfd, type, arg);
|
||||
if (ret == -1)
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1045,9 +1050,9 @@ int kvm_vcpu_ioctl(CPUState *env, int type, ...)
|
|||
va_end(ap);
|
||||
|
||||
ret = ioctl(env->kvm_fd, type, arg);
|
||||
if (ret == -1)
|
||||
if (ret == -1) {
|
||||
ret = -errno;
|
||||
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1116,8 +1121,9 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
|
|||
struct kvm_sw_breakpoint *bp;
|
||||
|
||||
QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
|
||||
if (bp->pc == pc)
|
||||
if (bp->pc == pc) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1172,8 +1178,9 @@ int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
|||
}
|
||||
|
||||
bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint));
|
||||
if (!bp)
|
||||
if (!bp) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bp->pc = addr;
|
||||
bp->use_count = 1;
|
||||
|
@ -1187,14 +1194,16 @@ int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
|
|||
bp, entry);
|
||||
} else {
|
||||
err = kvm_arch_insert_hw_breakpoint(addr, len, type);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
err = kvm_update_guest_debug(env, 0);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1208,8 +1217,9 @@ int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
|||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = kvm_find_sw_breakpoint(current_env, addr);
|
||||
if (!bp)
|
||||
if (!bp) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (bp->use_count > 1) {
|
||||
bp->use_count--;
|
||||
|
@ -1217,21 +1227,24 @@ int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
|
|||
}
|
||||
|
||||
err = kvm_arch_remove_sw_breakpoint(current_env, bp);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry);
|
||||
qemu_free(bp);
|
||||
} else {
|
||||
err = kvm_arch_remove_hw_breakpoint(addr, len, type);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
err = kvm_update_guest_debug(env, 0);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1246,15 +1259,17 @@ void kvm_remove_all_breakpoints(CPUState *current_env)
|
|||
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
|
||||
/* Try harder to find a CPU that currently sees the breakpoint. */
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0)
|
||||
if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
kvm_arch_remove_all_hw_breakpoints();
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu)
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
kvm_update_guest_debug(env, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !KVM_CAP_SET_GUEST_DEBUG */
|
||||
|
@ -1286,8 +1301,9 @@ int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
|
|||
struct kvm_signal_mask *sigmask;
|
||||
int r;
|
||||
|
||||
if (!sigset)
|
||||
if (!sigset) {
|
||||
return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
|
||||
}
|
||||
|
||||
sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset));
|
||||
|
||||
|
@ -1342,13 +1358,16 @@ int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
|
|||
.fd = fd,
|
||||
};
|
||||
int r;
|
||||
if (!kvm_enabled())
|
||||
if (!kvm_enabled()) {
|
||||
return -ENOSYS;
|
||||
if (!assign)
|
||||
}
|
||||
if (!assign) {
|
||||
kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
|
||||
}
|
||||
r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return -ENOSYS;
|
||||
|
|
Loading…
Reference in New Issue