queued 2.7 patches

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJXNRr0AAoJEK0ScMxN0CebXMkH/06UiAzfAX4V1NhbeM6fT8q9
 V5YjH5wY091LEwMBpBICMnLn/U+eN8PaHAzFkK51CgdDi0PnZkMzPf8Qmafiahxe
 3WVkjo/Iv/6gpyoJRKXCyecoJnV8/2RasqB9BS38K08UTovfRln6BIKeJgwNibsO
 EpQCBxjvc6W8GYiWlwvz0eSXX9wpbQhwOUXr+zRn164ySExNWBo6i7EYp1zjpiCB
 kuoZvq5Q4YsMX3rDzYvpeSbM7jDPOkLrzPTQ2yOcnaAc+Y4vng/uoXWlw4WV7xWR
 +NscXPjrq6gyu+FKw4wgjW9lGptG/+n4F2SJEgoucD9e2MpD3yS/uFrr2e881R4=
 =1PK7
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20160512' into staging

queued 2.7 patches

# gpg: Signature made Fri 13 May 2016 01:08:20 BST using RSA key ID 4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"

* remotes/rth/tags/pull-tcg-20160512: (39 commits)
  cpu-exec: Clean up 'interrupt_request' reloading in cpu_handle_interrupt()
  cpu-exec: Remove unused 'x86_cpu' and 'env' from cpu_exec()
  cpu-exec: Move TB execution stuff out of cpu_exec()
  cpu-exec: Move interrupt handling out of cpu_exec()
  cpu-exec: Move exception handling out of cpu_exec()
  cpu-exec: Move halt handling out of cpu_exec()
  cpu-exec: Remove relic orphaned comment
  tcg: Remove needless CPUState::current_tb
  cpu-exec: Move TB chaining into tb_find_fast()
  tcg: Rework tb_invalidated_flag
  tcg: Clean up from 'next_tb'
  cpu-exec: elide more icount code if CONFIG_USER_ONLY
  tcg: reorganize tb_find_physical loop
  tcg: code_bitmap and code_write_count are not used by user-mode emulation
  tcg: Allow goto_tb to any target PC in user mode
  tcg: Clean up direct block chaining safety checks
  tcg: Clean up tb_jmp_unlink()
  tcg: Extract removing of jumps to TB from tb_phys_invalidate()
  tcg: Rename tb_jmp_remove() to tb_remove_from_jmp_list()
  tcg: Clarify thread safety check in tb_add_jump()
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-05-13 10:42:40 +01:00
commit 20c20318f9
59 changed files with 1153 additions and 892 deletions

View File

@ -68,7 +68,6 @@ void cpu_reloading_memory_map(void)
void cpu_loop_exit(CPUState *cpu)
{
cpu->current_tb = NULL;
siglongjmp(cpu->jmp_env, 1);
}
@ -77,6 +76,5 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
if (pc) {
cpu_restore_state(cpu, pc);
}
cpu->current_tb = NULL;
siglongjmp(cpu->jmp_env, 1);
}

View File

@ -136,7 +136,9 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
{
CPUArchState *env = cpu->env_ptr;
uintptr_t next_tb;
uintptr_t ret;
TranslationBlock *last_tb;
int tb_exit;
uint8_t *tb_ptr = itb->tc_ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
@ -160,118 +162,125 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
#endif /* DEBUG_DISAS */
cpu->can_do_io = !use_icount;
next_tb = tcg_qemu_tb_exec(env, tb_ptr);
ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
next_tb & TB_EXIT_MASK);
last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
tb_exit = ret & TB_EXIT_MASK;
trace_exec_tb_exit(last_tb, tb_exit);
if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
if (tb_exit > TB_EXIT_IDX1) {
/* We didn't start executing this TB (eg because the instruction
* counter hit zero); we must restore the guest PC to the address
* of the start of the TB.
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
"Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
last_tb->tc_ptr, last_tb->pc,
lookup_symbol(last_tb->pc));
if (cc->synchronize_from_tb) {
cc->synchronize_from_tb(cpu, tb);
cc->synchronize_from_tb(cpu, last_tb);
} else {
assert(cc->set_pc);
cc->set_pc(cpu, tb->pc);
cc->set_pc(cpu, last_tb->pc);
}
}
if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
if (tb_exit == TB_EXIT_REQUESTED) {
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
cpu->tcg_exit_req = 0;
}
return next_tb;
return ret;
}
#ifndef CONFIG_USER_ONLY
/* Execute the code without caching the generated code. An interpreter
could be used if available. */
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
bool old_tb_flushed;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
old_tb_flushed = cpu->tb_flushed;
cpu->tb_flushed = false;
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
cpu->current_tb = tb;
tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
cpu->tb_flushed |= old_tb_flushed;
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
#endif
static TranslationBlock *tb_find_physical(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
uint64_t flags)
uint32_t flags)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb, **ptb1;
TranslationBlock *tb, **tb_hash_head, **ptb1;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
target_ulong virt_page2;
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
/* find translated block using physical mappings */
phys_pc = get_page_addr_code(env, pc);
phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_phys_hash_func(phys_pc);
ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
for(;;) {
tb = *ptb1;
if (!tb) {
return NULL;
}
/* Start at head of the hash entry */
ptb1 = tb_hash_head = &tcg_ctx.tb_ctx.tb_phys_hash[h];
tb = *ptb1;
while (tb) {
if (tb->pc == pc &&
tb->page_addr[0] == phys_page1 &&
tb->cs_base == cs_base &&
tb->flags == flags) {
/* check next page if needed */
if (tb->page_addr[1] != -1) {
tb_page_addr_t phys_page2;
virt_page2 = (pc & TARGET_PAGE_MASK) +
TARGET_PAGE_SIZE;
phys_page2 = get_page_addr_code(env, virt_page2);
if (tb->page_addr[1] == -1) {
/* done, we have a match */
break;
} else {
/* check next page if needed */
target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) +
TARGET_PAGE_SIZE;
tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2);
if (tb->page_addr[1] == phys_page2) {
break;
}
} else {
break;
}
}
ptb1 = &tb->phys_hash_next;
tb = *ptb1;
}
/* Move the TB to the head of the list */
*ptb1 = tb->phys_hash_next;
tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
if (tb) {
/* Move the TB to the head of the list */
*ptb1 = tb->phys_hash_next;
tb->phys_hash_next = *tb_hash_head;
*tb_hash_head = tb;
}
return tb;
}
static TranslationBlock *tb_find_slow(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
uint64_t flags)
uint32_t flags)
{
TranslationBlock *tb;
@ -309,26 +318,63 @@ found:
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
static inline TranslationBlock *tb_find_fast(CPUState *cpu,
TranslationBlock **last_tb,
int tb_exit)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
int flags;
uint32_t flags;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb_lock();
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags);
}
if (cpu->tb_flushed) {
/* Ensure that no TB jump will be modified as the
* translation buffer has been flushed.
*/
*last_tb = NULL;
cpu->tb_flushed = false;
}
/* See if we can patch the calling TB. */
if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tb_add_jump(*last_tb, tb_exit, tb);
}
tb_unlock();
return tb;
}
static void cpu_handle_debug_exception(CPUState *cpu)
static inline bool cpu_handle_halt(CPUState *cpu)
{
if (cpu->halted) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
&& replay_interrupt()) {
X86CPU *x86_cpu = X86_CPU(cpu);
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
}
#endif
if (!cpu_has_work(cpu)) {
current_cpu = NULL;
return true;
}
cpu->halted = 0;
}
return false;
}
static inline void cpu_handle_debug_exception(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
@ -342,37 +388,197 @@ static void cpu_handle_debug_exception(CPUState *cpu)
cc->debug_excp_handler(cpu);
}
static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
{
if (cpu->exception_index >= 0) {
if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
*ret = cpu->exception_index;
if (*ret == EXCP_DEBUG) {
cpu_handle_debug_exception(cpu);
}
cpu->exception_index = -1;
return true;
} else {
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->do_interrupt(cpu);
#endif
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
#else
if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->do_interrupt(cpu);
cpu->exception_index = -1;
} else if (!replay_has_interrupt()) {
/* give a chance to iothread in replay mode */
*ret = EXCP_INTERRUPT;
return true;
}
#endif
}
#ifndef CONFIG_USER_ONLY
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
TranslationBlock *last_tb = NULL; /* Avoid chaining TBs */
cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
*ret = -1;
return true;
#endif
}
return false;
}
static inline void cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
}
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
cpu_loop_exit(cpu);
}
#if defined(TARGET_I386)
else if (interrupt_request & CPU_INTERRUPT_INIT) {
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
replay_interrupt();
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
cpu_loop_exit(cpu);
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
cpu_loop_exit(cpu);
}
#endif
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
replay_interrupt();
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
*last_tb = NULL;
}
/* The target hook may have updated the 'cpu->interrupt_request';
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
}
}
if (unlikely(cpu->exit_request || replay_has_interrupt())) {
cpu->exit_request = 0;
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
TranslationBlock **last_tb, int *tb_exit,
SyncClocks *sc)
{
uintptr_t ret;
if (unlikely(cpu->exit_request)) {
return;
}
trace_exec_tb(tb, tb->pc);
ret = cpu_tb_exec(cpu, tb);
*last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
*tb_exit = ret & TB_EXIT_MASK;
switch (*tb_exit) {
case TB_EXIT_REQUESTED:
/* Something asked us to stop executing
* chained TBs; just continue round the main
* loop. Whatever requested the exit will also
* have set something else (eg exit_request or
* interrupt_request) which we will handle
* next time around the loop. But we need to
* ensure the tcg_exit_req read in generated code
* comes before the next read of cpu->exit_request
* or cpu->interrupt_request.
*/
smp_rmb();
*last_tb = NULL;
break;
case TB_EXIT_ICOUNT_EXPIRED:
{
/* Instruction counter expired. */
#ifdef CONFIG_USER_ONLY
abort();
#else
int insns_left = cpu->icount_decr.u32;
if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
cpu_exec_nocache(cpu, insns_left, *last_tb, false);
align_clocks(sc, cpu);
}
cpu->exception_index = EXCP_INTERRUPT;
*last_tb = NULL;
cpu_loop_exit(cpu);
}
break;
#endif
}
default:
break;
}
}
/* main execution loop */
int cpu_exec(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
#ifdef TARGET_I386
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
#endif
int ret, interrupt_request;
TranslationBlock *tb;
uintptr_t next_tb;
int ret;
SyncClocks sc;
/* replay_interrupt may need current_cpu */
current_cpu = cpu;
if (cpu->halted) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
&& replay_interrupt()) {
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
}
#endif
if (!cpu_has_work(cpu)) {
current_cpu = NULL;
return EXCP_HALTED;
}
cpu->halted = 0;
if (cpu_handle_halt(cpu)) {
return EXCP_HALTED;
}
atomic_mb_set(&tcg_current_cpu, cpu);
@ -391,185 +597,26 @@ int cpu_exec(CPUState *cpu)
*/
init_delay_params(&sc, cpu);
/* prepare setjmp context for exception handling */
for(;;) {
TranslationBlock *tb, *last_tb;
int tb_exit = 0;
/* prepare setjmp context for exception handling */
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
if (cpu->exception_index >= 0) {
if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
ret = cpu->exception_index;
if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(cpu);
}
cpu->exception_index = -1;
break;
} else {
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
cc->do_interrupt(cpu);
#endif
ret = cpu->exception_index;
cpu->exception_index = -1;
break;
#else
if (replay_exception()) {
cc->do_interrupt(cpu);
cpu->exception_index = -1;
} else if (!replay_has_interrupt()) {
/* give a chance to iothread in replay mode */
ret = EXCP_INTERRUPT;
break;
}
#endif
}
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
ret = -1;
if (cpu_handle_exception(cpu, &ret)) {
break;
}
next_tb = 0; /* force lookup of first TB */
last_tb = NULL; /* forget the last executed TB after exception */
cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
}
if (replay_mode == REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
cpu_loop_exit(cpu);
}
#if defined(TARGET_I386)
else if (interrupt_request & CPU_INTERRUPT_INIT) {
replay_interrupt();
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
cpu_loop_exit(cpu);
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
cpu_loop_exit(cpu);
}
#endif
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
replay_interrupt();
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
next_tb = 0;
}
}
/* Don't use the cached interrupt_request value,
do_interrupt may have updated the EXITTB flag. */
if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
}
}
if (unlikely(cpu->exit_request
|| replay_has_interrupt())) {
cpu->exit_request = 0;
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
tb_lock();
tb = tb_find_fast(cpu);
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
/* as some TB could have been invalidated because
of memory exceptions while generating the code, we
must recompute the hash index here */
next_tb = 0;
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
}
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
if (next_tb != 0 && tb->page_addr[1] == -1
&& !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
next_tb & TB_EXIT_MASK, tb);
}
tb_unlock();
if (likely(!cpu->exit_request)) {
trace_exec_tb(tb, tb->pc);
/* execute the generated code */
cpu->current_tb = tb;
next_tb = cpu_tb_exec(cpu, tb);
cpu->current_tb = NULL;
switch (next_tb & TB_EXIT_MASK) {
case TB_EXIT_REQUESTED:
/* Something asked us to stop executing
* chained TBs; just continue round the main
* loop. Whatever requested the exit will also
* have set something else (eg exit_request or
* interrupt_request) which we will handle
* next time around the loop. But we need to
* ensure the tcg_exit_req read in generated code
* comes before the next read of cpu->exit_request
* or cpu->interrupt_request.
*/
smp_rmb();
next_tb = 0;
break;
case TB_EXIT_ICOUNT_EXPIRED:
{
/* Instruction counter expired. */
int insns_left = cpu->icount_decr.u32;
if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
cpu_exec_nocache(cpu, insns_left, tb, false);
align_clocks(&sc, cpu);
}
cpu->exception_index = EXCP_INTERRUPT;
next_tb = 0;
cpu_loop_exit(cpu);
}
break;
}
default:
break;
}
}
cpu_handle_interrupt(cpu, &last_tb);
tb = tb_find_fast(cpu, &last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
} /* for(;;) */
} else {
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
@ -579,18 +626,10 @@ int cpu_exec(CPUState *cpu)
* Newer versions of gcc would complain about this code (-Wclobbered). */
cpu = current_cpu;
cc = CPU_GET_CLASS(cpu);
#ifdef TARGET_I386
x86_cpu = X86_CPU(cpu);
env = &x86_cpu->env;
#endif
#else /* buggy compiler */
/* Assert that the compiler does not smash local variables. */
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
#ifdef TARGET_I386
g_assert(x86_cpu == X86_CPU(cpu));
g_assert(env == &x86_cpu->env);
#endif
#endif /* buggy compiler */
cpu->can_do_io = 1;
tb_lock_reset();

View File

@ -76,10 +76,6 @@ void tlb_flush(CPUState *cpu, int flush_global)
tlb_debug("(%d)\n", flush_global);
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
@ -95,9 +91,6 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
CPUArchState *env = cpu->env_ptr;
tlb_debug("start\n");
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
for (;;) {
int mmu_idx = va_arg(argp, int);
@ -152,9 +145,6 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tlb_flush(cpu, 1);
return;
}
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@ -193,9 +183,6 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
va_end(argp);
return;
}
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);

2
exec.c
View File

@ -2087,7 +2087,7 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
int cpu_flags;
uint32_t cpu_flags;
if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise

View File

@ -397,7 +397,7 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
uint32_t imm32;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
int current_flags = 0;
uint32_t current_flags = 0;
if (smp_cpus == 1) {
handlers = &s->rom_state.up;
@ -446,7 +446,6 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
resume_all_vcpus();
if (!kvm_enabled()) {
cs->current_tb = NULL;
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(cs, NULL);
}

View File

@ -76,7 +76,8 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base, int flags,
target_ulong pc, target_ulong cs_base,
uint32_t flags,
int cflags);
void cpu_exec_init(CPUState *cpu, Error **errp);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
@ -229,13 +230,14 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
|| defined(__sparc__) || defined(__aarch64__) \
|| defined(__s390x__) || defined(__mips__) \
|| defined(CONFIG_TCG_INTERPRETER)
/* NOTE: Direct jump patching must be atomic to be thread-safe. */
#define USE_DIRECT_JUMP
#endif
struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
uint64_t flags; /* flags defining in which context the code was generated */
uint32_t flags; /* flags defining in which context the code was generated */
uint16_t size; /* size of target code for this block (1 <=
size <= TARGET_PAGE_SIZE) */
uint16_t icount;
@ -257,20 +259,34 @@ struct TranslationBlock {
struct TranslationBlock *page_next[2];
tb_page_addr_t page_addr[2];
/* the following data are used to directly call another TB from
the code of this one. */
uint16_t tb_next_offset[2]; /* offset of original jump target */
/* The following data are used to directly call another TB from
* the code of this one. This can be done either by emitting direct or
* indirect native jump instructions. These jumps are reset so that the TB
* just continue its execution. The TB can be linked to another one by
* setting one of the jump targets (or patching the jump instruction). Only
* two of such jumps are supported.
*/
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
#ifdef USE_DIRECT_JUMP
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
#else
uintptr_t tb_next[2]; /* address of jump generated code */
uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
#endif
/* list of TBs jumping to this one. This is a circular list using
the two least significant bits of the pointers to tell what is
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
jmp_first */
struct TranslationBlock *jmp_next[2];
struct TranslationBlock *jmp_first;
/* Each TB has an assosiated circular list of TBs jumping to this one.
* jmp_list_first points to the first TB jumping to this one.
* jmp_list_next is used to point to the next TB in a list.
* Since each TB can have two jumps, it can participate in two lists.
* jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
* TranslationBlock structure, but the two least significant bits of
* them are used to encode which data field of the pointed TB should
* be used to traverse the list further from that TB:
* 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
* In other words, 0/1 tells which jump is used in the pointed TB,
* and 2 means that this is a pointer back to the target TB of this list.
*/
uintptr_t jmp_list_next[2];
uintptr_t jmp_list_first;
};
#include "qemu/thread.h"
@ -288,8 +304,6 @@ struct TBContext {
/* statistics */
int tb_flush_count;
int tb_phys_invalidate_count;
int tb_invalidated_flag;
};
void tb_free(TranslationBlock *tb);
@ -302,7 +316,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */
}
#elif defined(_ARCH_PPC)
@ -312,7 +326,7 @@ void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */
}
#elif defined(__s390x__)
@ -320,36 +334,15 @@ static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
intptr_t disp = addr - (jmp_addr - 2);
stl_be_p((void*)jmp_addr, disp / 2);
atomic_set((int32_t *)jmp_addr, disp / 2);
/* no need to flush icache explicitly */
}
#elif defined(__aarch64__)
void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
#elif defined(__arm__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
#if !QEMU_GNUC_PREREQ(4, 1)
register unsigned long _beg __asm ("a1");
register unsigned long _end __asm ("a2");
register unsigned long _flg __asm ("a3");
#endif
/* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
*(uint32_t *)jmp_addr =
(*(uint32_t *)jmp_addr & ~0xffffff)
| (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
#if QEMU_GNUC_PREREQ(4, 1)
__builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
#else
/* flush icache */
_beg = jmp_addr;
_end = jmp_addr + 4;
_flg = 0;
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
#endif
}
void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 arm_tb_set_jmp_target
#elif defined(__sparc__) || defined(__mips__)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
#else
@ -359,7 +352,7 @@ void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, uintptr_t addr)
{
uint16_t offset = tb->tb_jmp_offset[n];
uint16_t offset = tb->jmp_insn_offset[n];
tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
}
@ -369,7 +362,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, uintptr_t addr)
{
tb->tb_next[n] = addr;
tb->jmp_target_addr[n] = addr;
}
#endif
@ -377,20 +370,23 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc_ptr, tb->pc, n,
tb_next->tc_ptr, tb_next->pc);
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
/* add in TB jmp circular list */
tb->jmp_next[n] = tb_next->jmp_first;
tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
if (tb->jmp_list_next[n]) {
/* Another thread has already done this while we were
* outside of the lock; nothing to do in this case */
return;
}
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc_ptr, tb->pc, n,
tb_next->tc_ptr, tb_next->pc);
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
/* add in TB jmp circular list */
tb->jmp_list_next[n] = tb_next->jmp_list_first;
tb_next->jmp_list_first = (uintptr_t)tb | n;
}
/* GETRA is the true target of the return instruction that we'll execute,

View File

@ -158,6 +158,20 @@ extern int daemon(int, int);
/* Round number up to multiple */
#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
/* Check if n is a multiple of m */
#define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0)
/* n-byte align pointer down */
#define QEMU_ALIGN_PTR_DOWN(p, n) \
((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n)))
/* n-byte align pointer up */
#define QEMU_ALIGN_PTR_UP(p, n) \
((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n)))
/* Check if pointer p is n-bytes aligned */
#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
#ifndef ROUND_UP
#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d))
#endif

View File

@ -238,6 +238,7 @@ struct kvm_run;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop.
* @tb_flushed: Indicates the translation buffer has been flushed.
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
@ -252,7 +253,6 @@ struct kvm_run;
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @current_tb: Currently executing TB.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
@ -289,6 +289,7 @@ struct CPUState {
bool stopped;
bool crash_occurred;
bool exit_request;
bool tb_flushed;
uint32_t interrupt_request;
int singlestep_enabled;
int64_t icount_extra;
@ -303,7 +304,6 @@ struct CPUState {
MemoryRegion *memory;
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs;
int gdb_num_regs;

View File

@ -254,7 +254,6 @@ static void cpu_common_reset(CPUState *cpu)
}
cpu->interrupt_request = 0;
cpu->current_tb = NULL;
cpu->halted = 0;
cpu->mem_io_pc = 0;
cpu->mem_io_vaddr = 0;

View File

@ -465,7 +465,7 @@ enum {
};
static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
target_ulong *cs_base, int *pflags)
target_ulong *cs_base, uint32_t *pflags)
{
int flags = 0;

View File

@ -460,12 +460,16 @@ static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
|| ctx->singlestep_enabled || singlestep) {
return false;
}
#ifndef CONFIG_USER_ONLY
/* If the destination is in the superpage, the page perms can't change. */
if (in_superpage(ctx, dest)) {
return true;
}
/* Check for the dest on the same page as the start of the TB. */
return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
#else
return true;
#endif
}
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)

View File

@ -2117,7 +2117,7 @@ static inline bool arm_cpu_bswap_data(CPUARMState *env)
#endif
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
if (is_a64(env)) {
*pc = env->pc;

View File

@ -274,10 +274,12 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
return false;
}
#ifndef CONFIG_USER_ONLY
/* Only link tbs from inside the same guest page */
if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
return false;
}
#endif
return true;
}

View File

@ -4049,15 +4049,22 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
return 0;
}
static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = s->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(s, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(s, dest);
tcg_gen_exit_tb(0);

View File

@ -249,7 +249,7 @@ int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -520,14 +520,22 @@ static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
gen_set_label(l1);
}
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
(dc->ppc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = dc->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(0);

View File

@ -1269,7 +1269,7 @@ void tcg_x86_init(void);
#include "exec/exec-all.h"
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;

View File

@ -2085,20 +2085,25 @@ static inline int insn_const_size(TCGMemOp ot)
}
}
static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
{
#ifndef CONFIG_USER_ONLY
return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
(pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
{
TranslationBlock *tb;
target_ulong pc;
target_ulong pc = s->cs_base + eip;
pc = s->cs_base + eip;
tb = s->tb;
/* NOTE: we handle the case where the TB spans two pages here */
if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
(pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
if (use_goto_tb(s, pc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
gen_jmp_im(eip);
tcg_gen_exit_tb((uintptr_t)tb + tb_num);
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
/* jump to another page: currently not optimized */
gen_jmp_im(eip);
@ -8178,7 +8183,7 @@ void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
CPUState *cs = CPU(cpu);
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
uint64_t flags;
uint32_t flags;
target_ulong pc_start;
target_ulong cs_base;
int num_insns;

View File

@ -226,7 +226,7 @@ int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -133,16 +133,25 @@ static inline void t_gen_illegal_insn(DisasContext *dc)
gen_helper_ill(cpu_env);
}
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
if (unlikely(dc->singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = dc->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
likely(!dc->singlestep_enabled)) {
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {

View File

@ -230,7 +230,7 @@ int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -852,19 +852,25 @@ static inline void gen_addr_fault(DisasContext *s)
} \
} while (0)
static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
{
#ifndef CONFIG_USER_ONLY
return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
(s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
/* Generate a jump to an immediate address. */
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
{
TranslationBlock *tb;
tb = s->tb;
if (unlikely(s->singlestep_enabled)) {
gen_exception(s, dest, EXCP_DEBUG);
} else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
(s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
} else if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(QREG_PC, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_jmp_im(s, dest);
tcg_gen_exit_tb(0);

View File

@ -322,7 +322,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->sregs[SR_PC];
*cs_base = 0;

View File

@ -124,14 +124,21 @@ static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
dc->is_jmp = DISAS_UPDATE;
}
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = dc->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb(0);

View File

@ -839,7 +839,7 @@ static inline void restore_pamask(CPUMIPSState *env)
}
static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->active_tc.PC;
*cs_base = 0;

View File

@ -4191,15 +4191,25 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
tcg_temp_free(t1);
}
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
if (unlikely(ctx->singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = ctx->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
likely(!ctx->singlestep_enabled)) {
if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
gen_save_pc(dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
gen_save_pc(dest);
if (ctx->singlestep_enabled) {

View File

@ -132,7 +132,7 @@ static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
#include "exec/exec-all.h"
static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -121,17 +121,26 @@ void moxie_translate_init(void)
done_init = 1;
}
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
if (unlikely(ctx->singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx,
int n, target_ulong dest)
{
TranslationBlock *tb;
tb = ctx->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
!ctx->singlestep_enabled) {
if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled) {

View File

@ -392,7 +392,7 @@ int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -190,15 +190,25 @@ static void check_ov64s(DisasContext *dc)
}
#endif*/
static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
if (unlikely(dc->singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = dc->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
likely(!dc->singlestep_enabled)) {
if (use_goto_tb(dc, dest)) {
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_goto_tb(n);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {

View File

@ -2303,7 +2303,7 @@ static inline void cpu_write_xer(CPUPPCState *env, target_ulong xer)
}
static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->nip;
*cs_base = 0;

View File

@ -3822,19 +3822,29 @@ static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
#endif
}
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
if (unlikely(ctx->singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
/*** Branch ***/
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = ctx->tb;
if (NARROW_MODE(ctx)) {
dest = (uint32_t) dest;
}
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
likely(!ctx->singlestep_enabled)) {
if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_tl(cpu_nip, dest & ~3);
if (unlikely(ctx->singlestep_enabled)) {

View File

@ -338,7 +338,7 @@ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
}
static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->psw.addr;
*cs_base = 0;

View File

@ -608,12 +608,17 @@ static void gen_op_calc_cc(DisasContext *s)
static int use_goto_tb(DisasContext *s, uint64_t dest)
{
/* NOTE: we handle the case where the TB spans two pages here */
return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
|| (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
&& !s->singlestep_enabled
&& !(s->tb->cflags & CF_LAST_IO)
&& !(s->tb->flags & FLAG_MASK_PER));
if (unlikely(s->singlestep_enabled) ||
(s->tb->cflags & CF_LAST_IO) ||
(s->tb->flags & FLAG_MASK_PER)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
(dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void account_noninline_branch(DisasContext *s, int cc_op)

View File

@ -347,7 +347,7 @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
}
static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -205,17 +205,26 @@ static void gen_write_sr(TCGv src)
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
}
static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
TranslationBlock *tb;
tb = ctx->tb;
if (unlikely(ctx->singlestep_enabled)) {
return false;
}
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
!ctx->singlestep_enabled) {
#ifndef CONFIG_USER_ONLY
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
if (use_goto_tb(ctx, dest)) {
/* Use a direct jump if in same page and singlestep not enabled */
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled)

View File

@ -688,7 +688,7 @@ trap_state* cpu_tsptr(CPUSPARCState* env);
#define TB_FLAG_AM_ENABLED (1 << 5)
static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = env->npc;

View File

@ -303,20 +303,30 @@ static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
}
}
static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
target_ulong npc)
{
if (unlikely(s->singlestep)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
(npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(DisasContext *s, int tb_num,
target_ulong pc, target_ulong npc)
{
TranslationBlock *tb;
tb = s->tb;
if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
(npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
!s->singlestep) {
if (use_goto_tb(s, pc, npc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
tcg_gen_exit_tb((uintptr_t)tb + tb_num);
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
/* jump to another page: currently not optimized */
tcg_gen_movi_tl(cpu_pc, pc);

View File

@ -169,7 +169,7 @@ TileGXCPU *cpu_tilegx_init(const char *cpu_model);
#define cpu_signal_handler cpu_tilegx_signal_handler
static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;

View File

@ -377,7 +377,7 @@ void tricore_tcg_init(void);
int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc);
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->PC;
*cs_base = 0;

View File

@ -3236,15 +3236,25 @@ static inline void gen_save_pc(target_ulong pc)
tcg_gen_movi_tl(cpu_PC, pc);
}
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
if (unlikely(ctx->singlestep_enabled)) {
return false;
}
#ifndef CONFIG_USER_ONLY
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
TranslationBlock *tb;
tb = ctx->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
likely(!ctx->singlestep_enabled)) {
if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
gen_save_pc(dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
gen_save_pc(dest);
if (ctx->singlestep_enabled) {

View File

@ -144,7 +144,7 @@ UniCore32CPU *uc32_cpu_init(const char *cpu_model);
#define cpu_init(cpu_model) CPU(uc32_cpu_init(cpu_model))
static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->regs[31];
*cs_base = 0;

View File

@ -1089,15 +1089,21 @@ static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t i
}
}
static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
{
#ifndef CONFIG_USER_ONLY
return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
}
static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
{
TranslationBlock *tb;
tb = s->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(dest);
tcg_gen_exit_tb((uintptr_t)tb + n);
tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(dest);
tcg_gen_exit_tb(0);

View File

@ -507,7 +507,7 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
#define XTENSA_TBFLAG_WINDOW_SHIFT 15
static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
target_ulong *cs_base, uint32_t *flags)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));

View File

@ -418,9 +418,11 @@ static void gen_jump(DisasContext *dc, TCGv dest)
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
#ifndef CONFIG_USER_ONLY
if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
#endif
gen_jump_slot(dc, tmp, slot);
tcg_temp_free(tmp);
}
@ -446,9 +448,11 @@ static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
#ifndef CONFIG_USER_ONLY
if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
#endif
gen_callw_slot(dc, callinc, tmp, slot);
tcg_temp_free(tmp);
}

View File

@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
*code_ptr = deposit32(*code_ptr, 0, 26, offset);
}
static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
tcg_insn_unit *target)
{
ptrdiff_t offset = target - code_ptr;
tcg_insn_unit insn;
tcg_debug_assert(offset == sextract64(offset, 0, 26));
/* read instruction, mask away previous PC_REL26 parameter contents,
set the proper offset, then write back the instruction. */
insn = atomic_read(code_ptr);
atomic_set(code_ptr, deposit32(insn, 0, 26, offset));
}
static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
ptrdiff_t offset = target - code_ptr;
@ -835,7 +847,7 @@ void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
tcg_insn_unit *target = (tcg_insn_unit *)addr;
reloc_pc26(code_ptr, target);
reloc_pc26_atomic(code_ptr, target);
flush_icache_range(jmp_addr, jmp_addr + 4);
}
@ -1294,12 +1306,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
#ifndef USE_DIRECT_JUMP
#error "USE_DIRECT_JUMP required for aarch64"
#endif
tcg_debug_assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
s->tb_jmp_offset[a0] = tcg_current_code_size(s);
/* consistency for USE_DIRECT_JUMP */
tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* actual branch destination will be patched by
aarch64_tb_set_jmp_target later, beware retranslation. */
tcg_out_goto_noaddr(s);
s->tb_next_offset[a0] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:

View File

@ -121,6 +121,14 @@ static inline void reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
*code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
}
static inline void reloc_pc24_atomic(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
tcg_insn_unit insn = atomic_read(code_ptr);
tcg_debug_assert(offset == sextract32(offset, 0, 24));
atomic_set(code_ptr, deposit32(insn, 0, 24, offset));
}
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
@ -1038,6 +1046,16 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
}
}
void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
tcg_insn_unit *target = (tcg_insn_unit *)addr;
/* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
reloc_pc24_atomic(code_ptr, target);
flush_icache_range(jmp_addr, jmp_addr + 4);
}
static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
{
if (l->has_value) {
@ -1647,17 +1665,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_goto(s, COND_AL, tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* Direct jump method */
s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
tcg_out_b_noaddr(s, COND_AL);
} else {
/* Indirect jump method */
intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
intptr_t ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
}
s->tb_next_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));

View File

@ -1123,6 +1123,21 @@ static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
tcg_out_branch(s, 0, dest);
}
static void tcg_out_nopn(TCGContext *s, int n)
{
int i;
/* Emit 1 or 2 operand size prefixes for the standard one byte nop,
* "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
* duplicate prefix, and all of the interesting recent cores can
* decode and discard the duplicates in a single cycle.
*/
tcg_debug_assert(n >= 1);
for (i = 1; i < n; ++i) {
tcg_out8(s, 0x66);
}
tcg_out8(s, 0x90);
}
#if defined(CONFIG_SOFTMMU)
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
@ -1775,17 +1790,25 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_jmp(s, tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* direct jump method */
int gap;
/* jump displacement must be aligned for atomic patching;
* see if we need to add extra nops before jump
*/
gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
if (gap != 1) {
tcg_out_nopn(s, gap - 1);
}
tcg_out8(s, OPC_JMP_long); /* jmp im */
s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
(intptr_t)(s->tb_next + args[0]));
(intptr_t)(s->tb_jmp_target_addr + args[0]));
}
s->tb_next_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);

View File

@ -881,13 +881,13 @@ static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
{
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* direct jump method */
tcg_abort();
} else {
/* indirect jump method */
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
(tcg_target_long)(s->tb_next + arg));
(tcg_target_long)(s->tb_jmp_target_addr + arg));
tcg_out_bundle(s, MmI,
tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
TCG_REG_R2, TCG_REG_R2),
@ -900,7 +900,7 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
TCG_REG_B6));
}
s->tb_next_offset[arg] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[arg] = tcg_current_code_size(s);
}
static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)

View File

@ -1397,19 +1397,19 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* direct jump method */
s->tb_jmp_offset[a0] = tcg_current_code_size(s);
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* Avoid clobbering the address during retranslation. */
tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
} else {
/* indirect jump method */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
(uintptr_t)(s->tb_next + a0));
(uintptr_t)(s->tb_jmp_target_addr + a0));
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
}
tcg_out_nop(s);
s->tb_next_offset[a0] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
@ -1885,7 +1885,6 @@ static void tcg_target_init(TCGContext *s)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
uint32_t *ptr = (uint32_t *)jmp_addr;
*ptr = deposit32(*ptr, 0, 26, addr >> 2);
atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
flush_icache_range(jmp_addr, jmp_addr + 4);
}

View File

@ -1237,6 +1237,7 @@ static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
}
#ifdef __powerpc64__
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
tcg_insn_unit i1, i2;
@ -1265,11 +1266,18 @@ void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
pair = (uint64_t)i2 << 32 | i1;
#endif
/* ??? __atomic_store_8, presuming there's some way to do that
for 32-bit, otherwise this is good enough for 64-bit. */
*(uint64_t *)jmp_addr = pair;
atomic_set((uint64_t *)jmp_addr, pair);
flush_icache_range(jmp_addr, jmp_addr + 8);
}
#else
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
intptr_t diff = addr - jmp_addr;
tcg_debug_assert(in_range_b(diff));
atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
flush_icache_range(jmp_addr, jmp_addr + 4);
}
#endif
static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
{
@ -1894,17 +1902,23 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_b(s, 0, tb_ret_addr);
break;
case INDEX_op_goto_tb:
tcg_debug_assert(s->tb_jmp_offset);
/* Direct jump. Ensure the next insns are 8-byte aligned. */
tcg_debug_assert(s->tb_jmp_insn_offset);
/* Direct jump. */
#ifdef __powerpc64__
/* Ensure the next insns are 8-byte aligned. */
if ((uintptr_t)s->code_ptr & 7) {
tcg_out32(s, NOP);
}
s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
/* To be replaced by either a branch+nop or a load into TMP1. */
s->code_ptr += 2;
tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
tcg_out32(s, BCCTR | BO_ALWAYS);
s->tb_next_offset[args[0]] = tcg_current_code_size(s);
#else
/* To be replaced by a branch. */
s->code_ptr++;
#endif
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
{

View File

@ -219,6 +219,8 @@ typedef enum S390Opcode {
RX_ST = 0x50,
RX_STC = 0x42,
RX_STH = 0x40,
NOP = 0x0707,
} S390Opcode;
#ifdef CONFIG_DEBUG_TCG
@ -1715,17 +1717,24 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* branch displacement must be aligned for atomic patching;
* see if we need to add extra nop before branch
*/
if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
tcg_out16(s, NOP);
}
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
s->code_ptr += 2;
} else {
/* load address stored at s->tb_next + args[0] */
tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
/* load address stored at s->tb_jmp_target_addr + args[0] */
tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
s->tb_jmp_target_addr + args[0]);
/* and go there */
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
}
s->tb_next_offset[args[0]] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
OP_32_64(ld8u):

View File

@ -1229,18 +1229,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* direct jump method */
s->tb_jmp_offset[a0] = tcg_current_code_size(s);
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* Make sure to preserve links during retranslation. */
tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
} else {
/* indirect jump method */
tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
tcg_out_ld_ptr(s, TCG_REG_T1,
(uintptr_t)(s->tb_jmp_target_addr + a0));
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
}
tcg_out_nop(s);
s->tb_next_offset[a0] = tcg_current_code_size(s);
s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
@ -1647,6 +1648,6 @@ void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
the code_gen_buffer can't be larger than 2GB. */
tcg_debug_assert(disp == (int32_t)disp);
*ptr = CALL | (uint32_t)disp >> 2;
atomic_set(ptr, deposit32(CALL, 0, 30, disp >> 2));
flush_icache_range(jmp_addr, jmp_addr + 4);
}

View File

@ -753,6 +753,19 @@ static inline void tcg_gen_exit_tb(uintptr_t val)
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
/**
* tcg_gen_goto_tb() - output goto_tb TCG operation
* @idx: Direct jump slot index (0 or 1)
*
* See tcg/README for more info about this TCG operation.
*
* NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within
* the pages this TB resides in because we don't take care of direct jumps when
* address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
* static address translation, so the destination address is always valid, TBs
* are always invalidated properly, and direct jumps are reset when mapping
* changes.
*/
void tcg_gen_goto_tb(unsigned idx);
#if TARGET_LONG_BITS == 32

View File

@ -510,9 +510,9 @@ struct TCGContext {
/* goto_tb support */
tcg_insn_unit *code_buf;
uintptr_t *tb_next;
uint16_t *tb_next_offset;
uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */
uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
/* liveness analysis */
uint16_t *op_dead_args; /* for each operation, each bit tells if the
@ -925,7 +925,7 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
/**
* tcg_qemu_tb_exec:
* @env: CPUArchState * for the CPU
* @env: pointer to CPUArchState for the CPU
* @tb_ptr: address of generated code for the TB to execute
*
* Start executing code from a given translation block.
@ -936,30 +936,31 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
* which has not yet been directly linked, or an asynchronous
* event such as an interrupt needs handling.
*
* The return value is a pointer to the next TB to execute
* (if known; otherwise zero). This pointer is assumed to be
* 4-aligned, and the bottom two bits are used to return further
* information:
* Return: The return value is the value passed to the corresponding
* tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
* The value is either zero or a 4-byte aligned pointer to that TB combined
* with additional information in its two least significant bits. The
* additional information is encoded as follows:
* 0, 1: the link between this TB and the next is via the specified
* TB index (0 or 1). That is, we left the TB via (the equivalent
* of) "goto_tb <index>". The main loop uses this to determine
* how to link the TB just executed to the next.
* 2: we are using instruction counting code generation, and we
* did not start executing this TB because the instruction counter
* would hit zero midway through it. In this case the next-TB pointer
* would hit zero midway through it. In this case the pointer
* returned is the TB we were about to execute, and the caller must
* arrange to execute the remaining count of instructions.
* 3: we stopped because the CPU's exit_request flag was set
* (usually meaning that there is an interrupt that needs to be
* handled). The next-TB pointer returned is the TB we were
* about to execute when we noticed the pending exit request.
* handled). The pointer returned is the TB we were about to execute
* when we noticed the pending exit request.
*
* If the bottom two bits indicate an exit-via-index then the CPU
* state is correctly synchronised and ready for execution of the next
* TB (and in particular the guest PC is the address to execute next).
* Otherwise, we gave up on execution of this TB before it started, and
* the caller must fix up the CPU state by calling the CPU's
* synchronize_from_tb() method with the next-TB pointer we return (falling
* synchronize_from_tb() method with the TB pointer we return (falling
* back to calling the CPU's set_pc method with tb->pb if no
* synchronize_from_tb() method exists).
*

View File

@ -553,17 +553,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out64(s, args[0]);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
if (s->tb_jmp_insn_offset) {
/* Direct jump method. */
tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_insn_offset));
/* Align for atomic patching and thread safety */
s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4);
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* Indirect jump method. */
TODO();
}
tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_next_offset));
s->tb_next_offset[args[0]] = tcg_current_code_size(s);
tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_reset_offset));
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tci_out_label(s, arg_label(args[0]));

11
tci.c
View File

@ -467,7 +467,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
{
long tcg_temps[CPU_TEMP_BUF_NLONGS];
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
uintptr_t next_tb = 0;
uintptr_t ret = 0;
tci_reg[TCG_AREG0] = (tcg_target_ulong)env;
tci_reg[TCG_REG_CALL_STACK] = sp_value;
@ -1085,11 +1085,14 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
/* QEMU specific operations. */
case INDEX_op_exit_tb:
next_tb = *(uint64_t *)tb_ptr;
ret = *(uint64_t *)tb_ptr;
goto exit;
break;
case INDEX_op_goto_tb:
t0 = tci_read_i32(&tb_ptr);
/* Jump address is aligned */
tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
t0 = atomic_read((int32_t *)tb_ptr);
tb_ptr += sizeof(int32_t);
tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr += (int32_t)t0;
continue;
@ -1240,5 +1243,5 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
tci_assert(tb_ptr == old_code_ptr + op_size);
}
exit:
return next_tb;
return ret;
}

View File

@ -1614,7 +1614,7 @@ kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d
# cpu-exec.c
disable exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
disable exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
disable exec_tb_exit(void *next_tb, unsigned int flags) "tb:%p flags=%x"
disable exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=%x"
# translate-all.c
translate_block(void *tb, uintptr_t pc, uint8_t *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"

View File

@ -72,11 +72,12 @@
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
TranslationBlock *first_tb;
#ifdef CONFIG_SOFTMMU
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned int code_write_count;
unsigned long *code_bitmap;
#if defined(CONFIG_USER_ONLY)
#else
unsigned long flags;
#endif
} PageDesc;
@ -153,8 +154,6 @@ void tb_lock_reset(void)
#endif
}
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
void cpu_gen_init(void)
@ -306,7 +305,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
cpu_restore_state_from_tb(cpu, tb, retaddr);
if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
@ -464,6 +462,8 @@ static inline PageDesc *page_find(tb_page_addr_t index)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
#elif defined(__powerpc64__)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
#elif defined(__powerpc__)
# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
#elif defined(__aarch64__)
# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
#elif defined(__arm__)
@ -505,7 +505,6 @@ static inline size_t size_code_gen_buffer(size_t tb_size)
if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
tb_size = MAX_CODE_GEN_BUFFER_SIZE;
}
tcg_ctx.code_gen_buffer_size = tb_size;
return tb_size;
}
@ -514,7 +513,7 @@ static inline size_t size_code_gen_buffer(size_t tb_size)
that the buffer not cross a 256MB boundary. */
static inline bool cross_256mb(void *addr, size_t size)
{
return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
}
/* We weren't able to allocate a buffer without crossing that boundary,
@ -522,7 +521,7 @@ static inline bool cross_256mb(void *addr, size_t size)
Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
static inline void *split_cross_256mb(void *buf1, size_t size1)
{
void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
size_t size2 = buf1 + size1 - buf2;
size1 = buf2 - buf1;
@ -683,11 +682,11 @@ static inline void *alloc_code_gen_buffer(void)
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
munmap(buf, size);
munmap(buf, size + qemu_real_host_page_size);
break;
}
/* Failure. Work with what we had. */
munmap(buf2, size);
munmap(buf2, size + qemu_real_host_page_size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
@ -784,9 +783,11 @@ void tb_free(TranslationBlock *tb)
static inline void invalidate_page_bitmap(PageDesc *p)
{
#ifdef CONFIG_SOFTMMU
g_free(p->code_bitmap);
p->code_bitmap = NULL;
p->code_write_count = 0;
#endif
}
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
@ -841,6 +842,7 @@ void tb_flush(CPUState *cpu)
CPU_FOREACH(cpu) {
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
cpu->tb_flushed = true;
}
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
@ -925,32 +927,33 @@ static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
}
}
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
{
TranslationBlock *tb1, **ptb;
TranslationBlock *tb1;
uintptr_t *ptb, ntb;
unsigned int n1;
ptb = &tb->jmp_next[n];
tb1 = *ptb;
if (tb1) {
ptb = &tb->jmp_list_next[n];
if (*ptb) {
/* find tb(n) in circular list */
for (;;) {
tb1 = *ptb;
n1 = (uintptr_t)tb1 & 3;
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ntb = *ptb;
n1 = ntb & 3;
tb1 = (TranslationBlock *)(ntb & ~3);
if (n1 == n && tb1 == tb) {
break;
}
if (n1 == 2) {
ptb = &tb1->jmp_first;
ptb = &tb1->jmp_list_first;
} else {
ptb = &tb1->jmp_next[n1];
ptb = &tb1->jmp_list_next[n1];
}
}
/* now we can suppress tb(n) from the list */
*ptb = tb->jmp_next[n];
*ptb = tb->jmp_list_next[n];
tb->jmp_next[n] = NULL;
tb->jmp_list_next[n] = (uintptr_t)NULL;
}
}
@ -958,7 +961,29 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n)
another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
tb_set_jmp_target(tb, n, addr);
}
/* remove any jumps to the TB */
static inline void tb_jmp_unlink(TranslationBlock *tb)
{
TranslationBlock *tb1;
uintptr_t *ptb, ntb;
unsigned int n1;
ptb = &tb->jmp_list_first;
for (;;) {
ntb = *ptb;
n1 = ntb & 3;
tb1 = (TranslationBlock *)(ntb & ~3);
if (n1 == 2) {
break;
}
tb_reset_jump(tb1, n1);
*ptb = tb1->jmp_list_next[n1];
tb1->jmp_list_next[n1] = (uintptr_t)NULL;
}
}
/* invalidate one TB */
@ -966,9 +991,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
CPUState *cpu;
PageDesc *p;
unsigned int h, n1;
unsigned int h;
tb_page_addr_t phys_pc;
TranslationBlock *tb1, *tb2;
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
@ -987,8 +1011,6 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
invalidate_page_bitmap(p);
}
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
@ -998,27 +1020,16 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
}
/* suppress this TB from the two jump lists */
tb_jmp_remove(tb, 0);
tb_jmp_remove(tb, 1);
tb_remove_from_jmp_list(tb, 0);
tb_remove_from_jmp_list(tb, 1);
/* suppress any remaining jumps to this TB */
tb1 = tb->jmp_first;
for (;;) {
n1 = (uintptr_t)tb1 & 3;
if (n1 == 2) {
break;
}
tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
tb2 = tb1->jmp_next[n1];
tb_reset_jump(tb1, n1);
tb1->jmp_next[n1] = NULL;
tb1 = tb2;
}
tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
tb_jmp_unlink(tb);
tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
}
#ifdef CONFIG_SOFTMMU
static void build_page_bitmap(PageDesc *p)
{
int n, tb_start, tb_end;
@ -1047,362 +1058,6 @@ static void build_page_bitmap(PageDesc *p)
tb = tb->page_next[n];
}
}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
int flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
tb_page_addr_t phys_pc, phys_page2;
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
cflags |= CF_USE_ICOUNT;
}
tb = tb_alloc(pc);
if (unlikely(!tb)) {
buffer_overflow:
/* flush must be done */
tb_flush(cpu);
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
/* Don't forget to invalidate previous TB info. */
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
gen_code_buf = tcg_ctx.code_gen_ptr;
tb->tc_ptr = gen_code_buf;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
exceptions */
ti = profile_getclock();
#endif
tcg_func_start(&tcg_ctx);
gen_intermediate_code(env, tb);
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
tb->tb_next_offset[0] = 0xffff;
tb->tb_next_offset[1] = 0xffff;
tcg_ctx.tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
tcg_ctx.tb_next = NULL;
#else
tcg_ctx.tb_jmp_offset = NULL;
tcg_ctx.tb_next = tb->tb_next;
#endif
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count++;
tcg_ctx.interm_time += profile_getclock() - ti;
tcg_ctx.code_time -= profile_getclock();
#endif
/* ??? Overflow could be handled better here. In particular, we
don't need to re-do gen_intermediate_code, nor should we re-do
the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(&tcg_ctx, tb);
if (unlikely(gen_code_size < 0)) {
goto buffer_overflow;
}
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
goto buffer_overflow;
}
#ifdef CONFIG_PROFILER
tcg_ctx.code_time += profile_getclock();
tcg_ctx.code_in_len += tb->size;
tcg_ctx.code_out_len += gen_code_size;
tcg_ctx.search_out_len += search_size;
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(tb->pc)) {
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
qemu_log_flush();
}
#endif
tcg_ctx.code_gen_ptr = (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
tb_link_page(tb, phys_pc, phys_page2);
return tb;
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end may refer to *different* physical pages.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
* Called with mmap_lock held for user-mode emulation
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
while (start < end) {
tb_invalidate_phys_page_range(start, end, 0);
start &= TARGET_PAGE_MASK;
start += TARGET_PAGE_SIZE;
}
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end must refer to the *same* physical page.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
* Called with mmap_lock held for user-mode emulation
*/
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
{
TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *cpu = current_cpu;
#if defined(TARGET_HAS_PRECISE_SMC)
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end;
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
int current_tb_not_found = is_cpu_write_access;
TranslationBlock *current_tb = NULL;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
int current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
}
#if defined(TARGET_HAS_PRECISE_SMC)
if (cpu != NULL) {
env = cpu->env_ptr;
}
#endif
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all
the code */
tb = p->first_tb;
while (tb != NULL) {
n = (uintptr_t)tb & 3;
tb = (TranslationBlock *)((uintptr_t)tb & ~3);
tb_next = tb->page_next[n];
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
tb_end = tb_start + tb->size;
} else {
tb_start = tb->page_addr[1];
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
}
if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
if (cpu->mem_io_pc) {
/* now we have a real cpu fault */
current_tb = tb_find_pc(cpu->mem_io_pc);
}
}
if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
/* we need to do that to handle the case where a signal
occurs while doing tb_phys_invalidate() */
saved_tb = NULL;
if (cpu != NULL) {
saved_tb = cpu->current_tb;
cpu->current_tb = NULL;
}
tb_phys_invalidate(tb, -1);
if (cpu != NULL) {
cpu->current_tb = saved_tb;
if (cpu->interrupt_request && cpu->current_tb) {
cpu_interrupt(cpu, cpu->interrupt_request);
}
}
}
tb = tb_next;
}
#if !defined(CONFIG_USER_ONLY)
/* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) {
invalidate_page_bitmap(p);
tlb_unprotect_code(start);
}
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(cpu, NULL);
}
#endif
}
/* len must be <= 8 and start must be a multiple of len */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
PageDesc *p;
#if 0
if (1) {
qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
cpu_single_env->mem_io_vaddr, len,
cpu_single_env->eip,
cpu_single_env->eip +
(intptr_t)cpu_single_env->segs[R_CS].base);
}
#endif
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
}
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
/* build code bitmap */
build_page_bitmap(p);
}
if (p->code_bitmap) {
unsigned int nr;
unsigned long b;
nr = start & ~TARGET_PAGE_MASK;
b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
if (b & ((1 << len) - 1)) {
goto do_invalidate;
}
} else {
do_invalidate:
tb_invalidate_phys_page_range(start, start + len, 1);
}
}
#if !defined(CONFIG_SOFTMMU)
/* Called with mmap_lock held. */
static void tb_invalidate_phys_page(tb_page_addr_t addr,
uintptr_t pc, void *puc,
bool locked)
{
TranslationBlock *tb;
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
TranslationBlock *current_tb = NULL;
CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
int current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
return;
}
tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
current_tb = tb_find_pc(pc);
}
if (cpu != NULL) {
env = cpu->env_ptr;
}
#endif
while (tb != NULL) {
n = (uintptr_t)tb & 3;
tb = (TranslationBlock *)((uintptr_t)tb & ~3);
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
cpu_restore_state_from_tb(cpu, current_tb, pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
tb = tb->page_next[n];
}
p->first_tb = NULL;
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
if (locked) {
mmap_unlock();
}
cpu_resume_from_signal(cpu, puc);
}
#endif
}
#endif
/* add the tb in the target page and protect it if necessary
@ -1488,23 +1143,370 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb->page_addr[1] = -1;
}
tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
tb->jmp_next[0] = NULL;
tb->jmp_next[1] = NULL;
/* init original jump addresses */
if (tb->tb_next_offset[0] != 0xffff) {
tb_reset_jump(tb, 0);
}
if (tb->tb_next_offset[1] != 0xffff) {
tb_reset_jump(tb, 1);
}
#ifdef DEBUG_TB_CHECK
tb_page_check();
#endif
}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
tb_page_addr_t phys_pc, phys_page2;
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
cflags |= CF_USE_ICOUNT;
}
tb = tb_alloc(pc);
if (unlikely(!tb)) {
buffer_overflow:
/* flush must be done */
tb_flush(cpu);
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
}
gen_code_buf = tcg_ctx.code_gen_ptr;
tb->tc_ptr = gen_code_buf;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
exceptions */
ti = profile_getclock();
#endif
tcg_func_start(&tcg_ctx);
gen_intermediate_code(env, tb);
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
#ifdef USE_DIRECT_JUMP
tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
tcg_ctx.tb_jmp_target_addr = NULL;
#else
tcg_ctx.tb_jmp_insn_offset = NULL;
tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
#endif
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count++;
tcg_ctx.interm_time += profile_getclock() - ti;
tcg_ctx.code_time -= profile_getclock();
#endif
/* ??? Overflow could be handled better here. In particular, we
don't need to re-do gen_intermediate_code, nor should we re-do
the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(&tcg_ctx, tb);
if (unlikely(gen_code_size < 0)) {
goto buffer_overflow;
}
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
goto buffer_overflow;
}
#ifdef CONFIG_PROFILER
tcg_ctx.code_time += profile_getclock();
tcg_ctx.code_in_len += tb->size;
tcg_ctx.code_out_len += gen_code_size;
tcg_ctx.search_out_len += search_size;
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(tb->pc)) {
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
qemu_log_flush();
}
#endif
tcg_ctx.code_gen_ptr = (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
/* init jump list */
assert(((uintptr_t)tb & 3) == 0);
tb->jmp_list_first = (uintptr_t)tb | 2;
tb->jmp_list_next[0] = (uintptr_t)NULL;
tb->jmp_list_next[1] = (uintptr_t)NULL;
/* init original jump addresses wich has been set during tcg_gen_code() */
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 0);
}
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 1);
}
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
/* As long as consistency of the TB stuff is provided by tb_lock in user
* mode and is implicit in single-threaded softmmu emulation, no explicit
* memory barrier is required before tb_link_page() makes the TB visible
* through the physical hash table and physical page list.
*/
tb_link_page(tb, phys_pc, phys_page2);
return tb;
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end may refer to *different* physical pages.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
* Called with mmap_lock held for user-mode emulation
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
while (start < end) {
tb_invalidate_phys_page_range(start, end, 0);
start &= TARGET_PAGE_MASK;
start += TARGET_PAGE_SIZE;
}
}
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end must refer to the *same* physical page.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
* Called with mmap_lock held for user-mode emulation
*/
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
{
TranslationBlock *tb, *tb_next;
#if defined(TARGET_HAS_PRECISE_SMC)
CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end;
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
int current_tb_not_found = is_cpu_write_access;
TranslationBlock *current_tb = NULL;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
}
#if defined(TARGET_HAS_PRECISE_SMC)
if (cpu != NULL) {
env = cpu->env_ptr;
}
#endif
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all
the code */
tb = p->first_tb;
while (tb != NULL) {
n = (uintptr_t)tb & 3;
tb = (TranslationBlock *)((uintptr_t)tb & ~3);
tb_next = tb->page_next[n];
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
tb_end = tb_start + tb->size;
} else {
tb_start = tb->page_addr[1];
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
}
if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
if (cpu->mem_io_pc) {
/* now we have a real cpu fault */
current_tb = tb_find_pc(cpu->mem_io_pc);
}
}
if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, -1);
}
tb = tb_next;
}
#if !defined(CONFIG_USER_ONLY)
/* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) {
invalidate_page_bitmap(p);
tlb_unprotect_code(start);
}
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(cpu, NULL);
}
#endif
}
#ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
PageDesc *p;
#if 0
if (1) {
qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
cpu_single_env->mem_io_vaddr, len,
cpu_single_env->eip,
cpu_single_env->eip +
(intptr_t)cpu_single_env->segs[R_CS].base);
}
#endif
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
}
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
/* build code bitmap */
build_page_bitmap(p);
}
if (p->code_bitmap) {
unsigned int nr;
unsigned long b;
nr = start & ~TARGET_PAGE_MASK;
b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
if (b & ((1 << len) - 1)) {
goto do_invalidate;
}
} else {
do_invalidate:
tb_invalidate_phys_page_range(start, start + len, 1);
}
}
#else
/* Called with mmap_lock held. */
static void tb_invalidate_phys_page(tb_page_addr_t addr,
uintptr_t pc, void *puc,
bool locked)
{
TranslationBlock *tb;
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
TranslationBlock *current_tb = NULL;
CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
uint32_t current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
return;
}
tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
current_tb = tb_find_pc(pc);
}
if (cpu != NULL) {
env = cpu->env_ptr;
}
#endif
while (tb != NULL) {
n = (uintptr_t)tb & 3;
tb = (TranslationBlock *)((uintptr_t)tb & ~3);
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
(current_tb->cflags & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
cpu_restore_state_from_tb(cpu, current_tb, pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
tb = tb->page_next[n];
}
p->first_tb = NULL;
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
if (locked) {
mmap_unlock();
}
cpu_resume_from_signal(cpu, puc);
}
#endif
}
#endif
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
@ -1574,7 +1576,7 @@ void tb_check_watchpoint(CPUState *cpu)
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
tb_page_addr_t addr;
int flags;
uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
@ -1593,7 +1595,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
TranslationBlock *tb;
uint32_t n, cflags;
target_ulong pc, cs_base;
uint64_t flags;
uint32_t flags;
tb = tb_find_pc(retaddr);
if (!tb) {
@ -1689,9 +1691,9 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
if (tb->page_addr[1] != -1) {
cross_page++;
}
if (tb->tb_next_offset[0] != 0xffff) {
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp_count++;
if (tb->tb_next_offset[1] != 0xffff) {
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp2_count++;
}
}