cpu-exec: lookup/generate TB outside exclusive region during step_atomic

Now that all code generation has been converted to check CF_PARALLEL, we can
generate !CF_PARALLEL code without having yet set !parallel_cpus --
and therefore without having to be in the exclusive region during
cpu_exec_step_atomic.

While at it, merge cpu_exec_step into cpu_exec_step_atomic.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2017-07-14 17:56:30 -04:00 committed by Richard Henderson
parent e82d5a2460
commit ac03ee5331
1 changed files with 14 additions and 16 deletions

View File

@ -223,30 +223,40 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
} }
#endif #endif
static void cpu_exec_step(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags;
uint32_t cflags = 1 | CF_IGNORE_ICOUNT; uint32_t cflags = 1 | CF_IGNORE_ICOUNT;
uint32_t cf_mask = cflags & CF_HASH_MASK;
if (sigsetjmp(cpu->jmp_env, 0) == 0) { if (sigsetjmp(cpu->jmp_env, 0) == 0) {
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
cflags & CF_HASH_MASK);
if (tb == NULL) { if (tb == NULL) {
mmap_lock(); mmap_lock();
tb_lock(); tb_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
if (likely(tb == NULL)) {
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
}
tb_unlock(); tb_unlock();
mmap_unlock(); mmap_unlock();
} }
start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
cc->cpu_exec_enter(cpu); cc->cpu_exec_enter(cpu);
/* execute the generated code */ /* execute the generated code */
trace_exec_tb(tb, pc); trace_exec_tb(tb, pc);
cpu_tb_exec(cpu, tb); cpu_tb_exec(cpu, tb);
cc->cpu_exec_exit(cpu); cc->cpu_exec_exit(cpu);
parallel_cpus = true;
end_exclusive();
} else { } else {
/* We may have exited due to another problem here, so we need /* We may have exited due to another problem here, so we need
* to reset any tb_locks we may have taken but didn't release. * to reset any tb_locks we may have taken but didn't release.
@ -260,18 +270,6 @@ static void cpu_exec_step(CPUState *cpu)
} }
} }
void cpu_exec_step_atomic(CPUState *cpu)
{
start_exclusive();
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
cpu_exec_step(cpu);
parallel_cpus = true;
end_exclusive();
}
struct tb_desc { struct tb_desc {
target_ulong pc; target_ulong pc;
target_ulong cs_base; target_ulong cs_base;