mirror of https://github.com/xemu-project/xemu.git
accel/tcg: cache single instruction TB on pending replay exception
Again there is no reason to jump through the nocache hoops to execute a single instruction block. We do have to add an additional wrinkle to the cpu_handle_interrupt case to ensure we let through a TB where we have specifically disabled icount for the block. As the last user of cpu_exec_nocache we can now remove the function. Further clean-up will follow in subsequent patches. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210213130325.14781-18-alex.bennee@linaro.org>
This commit is contained in:
parent
bc662a3351
commit
a11bbb6a23
|
@ -224,40 +224,6 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
|||
return last_tb;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* Execute the code without caching the generated code. An interpreter
|
||||
could be used if available. */
|
||||
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
||||
TranslationBlock *orig_tb, bool ignore_icount)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
uint32_t cflags = curr_cflags() | CF_NOCACHE;
|
||||
int tb_exit;
|
||||
|
||||
if (ignore_icount) {
|
||||
cflags &= ~CF_USE_ICOUNT;
|
||||
}
|
||||
|
||||
/* Should never happen.
|
||||
We only end up here when an existing TB is too long. */
|
||||
cflags |= MIN(max_cycles, CF_COUNT_MASK);
|
||||
|
||||
mmap_lock();
|
||||
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
|
||||
orig_tb->flags, cflags);
|
||||
tb->orig_tb = orig_tb;
|
||||
mmap_unlock();
|
||||
|
||||
/* execute the generated code */
|
||||
trace_exec_tb_nocache(tb, tb->pc);
|
||||
cpu_tb_exec(cpu, tb, &tb_exit);
|
||||
|
||||
mmap_lock();
|
||||
tb_phys_invalidate(tb, -1);
|
||||
mmap_unlock();
|
||||
tcg_tb_remove(tb);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void cpu_exec_enter(CPUState *cpu)
|
||||
{
|
||||
|
@ -524,15 +490,12 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
if (replay_has_exception()
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* try to cause an exception pending in the log */
|
||||
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
|
||||
/* Execute just one insn to trigger exception pending in the log */
|
||||
cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
|
||||
}
|
||||
#endif
|
||||
if (cpu->exception_index < 0) {
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cpu->exception_index >= EXCP_INTERRUPT) {
|
||||
/* exit request from the cpu execution loop */
|
||||
*ret = cpu->exception_index;
|
||||
|
@ -688,6 +651,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
/* Finally, check if we need to exit to the main loop. */
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (icount_enabled()
|
||||
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
|
|
Loading…
Reference in New Issue