mirror of https://github.com/xemu-project/xemu.git
accel/tcg: Remove cpu_neg()
Now that CPUNegativeOffsetState is part of CPUState, we can reference it directly. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
464dacf609
commit
a953b5fa15
|
@ -73,7 +73,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||||
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
||||||
sc->last_cpu_icount = cpu_icount;
|
sc->last_cpu_icount = cpu_icount;
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
|
||||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
||||||
sc->last_cpu_icount
|
sc->last_cpu_icount
|
||||||
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
= cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||||
if (sc->diff_clk < max_delay) {
|
if (sc->diff_clk < max_delay) {
|
||||||
max_delay = sc->diff_clk;
|
max_delay = sc->diff_clk;
|
||||||
}
|
}
|
||||||
|
@ -717,7 +717,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||||
if (cpu->exception_index < 0) {
|
if (cpu->exception_index < 0) {
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
if (replay_has_exception()
|
if (replay_has_exception()
|
||||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||||
/* Execute just one insn to trigger exception pending in the log */
|
/* Execute just one insn to trigger exception pending in the log */
|
||||||
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
||||||
| CF_LAST_IO | CF_NOIRQ | 1;
|
| CF_LAST_IO | CF_NOIRQ | 1;
|
||||||
|
@ -807,7 +807,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||||
* Ensure zeroing happens before reading cpu->exit_request or
|
* Ensure zeroing happens before reading cpu->exit_request or
|
||||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||||
*/
|
*/
|
||||||
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
||||||
|
|
||||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||||
int interrupt_request;
|
int interrupt_request;
|
||||||
|
@ -898,7 +898,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||||
|| (icount_enabled()
|
|| (icount_enabled()
|
||||||
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
||||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||||
qatomic_set(&cpu->exit_request, 0);
|
qatomic_set(&cpu->exit_request, 0);
|
||||||
if (cpu->exception_index == -1) {
|
if (cpu->exception_index == -1) {
|
||||||
cpu->exception_index = EXCP_INTERRUPT;
|
cpu->exception_index = EXCP_INTERRUPT;
|
||||||
|
@ -923,7 +923,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||||
}
|
}
|
||||||
|
|
||||||
*last_tb = NULL;
|
*last_tb = NULL;
|
||||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
|
||||||
if (insns_left < 0) {
|
if (insns_left < 0) {
|
||||||
/* Something asked us to stop executing chained TBs; just
|
/* Something asked us to stop executing chained TBs; just
|
||||||
* continue round the main loop. Whatever requested the exit
|
* continue round the main loop. Whatever requested the exit
|
||||||
|
@ -942,7 +942,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||||
icount_update(cpu);
|
icount_update(cpu);
|
||||||
/* Refill decrementer and continue execution. */
|
/* Refill decrementer and continue execution. */
|
||||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
cpu->neg.icount_decr.u16.low = insns_left;
|
||||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -111,14 +111,14 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||||
* each vCPU execution. However u16.high can be raised
|
* each vCPU execution. However u16.high can be raised
|
||||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||||
*/
|
*/
|
||||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
g_assert(cpu->neg.icount_decr.u16.low == 0);
|
||||||
g_assert(cpu->icount_extra == 0);
|
g_assert(cpu->icount_extra == 0);
|
||||||
|
|
||||||
replay_mutex_lock();
|
replay_mutex_lock();
|
||||||
|
|
||||||
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
||||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
cpu->neg.icount_decr.u16.low = insns_left;
|
||||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||||
|
|
||||||
if (cpu->icount_budget == 0) {
|
if (cpu->icount_budget == 0) {
|
||||||
|
@ -138,7 +138,7 @@ void icount_process_data(CPUState *cpu)
|
||||||
icount_update(cpu);
|
icount_update(cpu);
|
||||||
|
|
||||||
/* Reset the counters */
|
/* Reset the counters */
|
||||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
cpu->neg.icount_decr.u16.low = 0;
|
||||||
cpu->icount_extra = 0;
|
cpu->icount_extra = 0;
|
||||||
cpu->icount_budget = 0;
|
cpu->icount_budget = 0;
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||||
if (!qemu_cpu_is_self(cpu)) {
|
if (!qemu_cpu_is_self(cpu)) {
|
||||||
qemu_cpu_kick(cpu);
|
qemu_cpu_kick(cpu);
|
||||||
} else {
|
} else {
|
||||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -214,7 +214,7 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||||
* Reset the cycle counter to the start of the block and
|
* Reset the cycle counter to the start of the block and
|
||||||
* shift if to the number of actually executed instructions.
|
* shift if to the number of actually executed instructions.
|
||||||
*/
|
*/
|
||||||
cpu_neg(cpu)->icount_decr.u16.low += insns_left;
|
cpu->neg.icount_decr.u16.low += insns_left;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
|
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
|
||||||
|
@ -623,7 +623,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||||
cc = CPU_GET_CLASS(cpu);
|
cc = CPU_GET_CLASS(cpu);
|
||||||
if (cc->tcg_ops->io_recompile_replay_branch &&
|
if (cc->tcg_ops->io_recompile_replay_branch &&
|
||||||
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
|
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
|
||||||
cpu_neg(cpu)->icount_decr.u16.low++;
|
cpu->neg.icount_decr.u16.low++;
|
||||||
n = 2;
|
n = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -779,7 +779,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
|
||||||
{
|
{
|
||||||
g_assert(qemu_mutex_iothread_locked());
|
g_assert(qemu_mutex_iothread_locked());
|
||||||
cpu->interrupt_request |= mask;
|
cpu->interrupt_request |= mask;
|
||||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_USER_ONLY */
|
#endif /* CONFIG_USER_ONLY */
|
||||||
|
|
|
@ -471,17 +471,6 @@ static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
|
||||||
return &env_cpu(env)->neg;
|
return &env_cpu(env)->neg;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* cpu_neg(cpu)
|
|
||||||
* @cpu: The generic CPUState
|
|
||||||
*
|
|
||||||
* Return the CPUNegativeOffsetState associated with the cpu.
|
|
||||||
*/
|
|
||||||
static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
|
|
||||||
{
|
|
||||||
return &cpu->neg;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* env_tlb(env)
|
* env_tlb(env)
|
||||||
* @env: The architecture environment
|
* @env: The architecture environment
|
||||||
|
|
|
@ -71,7 +71,7 @@ G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
|
||||||
*/
|
*/
|
||||||
static inline bool cpu_loop_exit_requested(CPUState *cpu)
|
static inline bool cpu_loop_exit_requested(CPUState *cpu)
|
||||||
{
|
{
|
||||||
return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
|
return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||||
|
|
|
@ -75,7 +75,7 @@ static void icount_enable_adaptive(void)
|
||||||
static int64_t icount_get_executed(CPUState *cpu)
|
static int64_t icount_get_executed(CPUState *cpu)
|
||||||
{
|
{
|
||||||
return (cpu->icount_budget -
|
return (cpu->icount_budget -
|
||||||
(cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
|
(cpu->neg.icount_decr.u16.low + cpu->icount_extra));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue