mirror of https://github.com/xemu-project/xemu.git
tcg: Define and use new tlb_hit() and tlb_hit_page() functions
The condition to check whether an address has hit against a particular TLB entry is not completely trivial. We do this in various places, and in fact in one place (get_page_addr_code()) we have got the condition wrong. Abstract it out into new tlb_hit() and tlb_hit_page() inline functions (one for a known-page-aligned address and one for an arbitrary address), and use them in all the places where we had the condition correct. This is a no-behaviour-change patch; we leave fixing the buggy code in get_page_addr_code() to a subsequent patch. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20180629162122.19376-2-peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
a688e73ba8
commit
334692bce7
|
@ -239,12 +239,9 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||||
|
|
||||||
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
|
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
|
||||||
{
|
{
|
||||||
if (addr == (tlb_entry->addr_read &
|
if (tlb_hit_page(tlb_entry->addr_read, addr) ||
|
||||||
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
|
tlb_hit_page(tlb_entry->addr_write, addr) ||
|
||||||
addr == (tlb_entry->addr_write &
|
tlb_hit_page(tlb_entry->addr_code, addr)) {
|
||||||
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
|
|
||||||
addr == (tlb_entry->addr_code &
|
|
||||||
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
memset(tlb_entry, -1, sizeof(*tlb_entry));
|
memset(tlb_entry, -1, sizeof(*tlb_entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1046,8 +1043,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
|
||||||
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||||
|
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
/* TLB entry is for a different page */
|
/* TLB entry is for a different page */
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
|
||||||
|
@ -1091,8 +1087,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check TLB entry and enforce page permissions. */
|
/* Check TLB entry and enforce page permissions. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
|
|
@ -123,8 +123,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -191,8 +190,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -286,8 +284,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -322,7 +319,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
|
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
|
||||||
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
|
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
|
||||||
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
|
if (!tlb_hit_page(tlb_addr2, page2)
|
||||||
&& !VICTIM_TLB_HIT(addr_write, page2)) {
|
&& !VICTIM_TLB_HIT(addr_write, page2)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -364,8 +361,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
@ -400,7 +396,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||||
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
|
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
|
||||||
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
|
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
|
||||||
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
|
if (!tlb_hit_page(tlb_addr2, page2)
|
||||||
&& !VICTIM_TLB_HIT(addr_write, page2)) {
|
&& !VICTIM_TLB_HIT(addr_write, page2)) {
|
||||||
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
|
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
|
||||||
mmu_idx, retaddr);
|
mmu_idx, retaddr);
|
||||||
|
|
|
@ -339,6 +339,29 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
||||||
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
||||||
| TLB_RECHECK)
|
| TLB_RECHECK)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tlb_hit_page: return true if page aligned @addr is a hit against the
|
||||||
|
* TLB entry @tlb_addr
|
||||||
|
*
|
||||||
|
* @addr: virtual address to test (must be page aligned)
|
||||||
|
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
|
||||||
|
*/
|
||||||
|
static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
|
||||||
|
{
|
||||||
|
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
|
||||||
|
*
|
||||||
|
* @addr: virtual address to test (need not be page aligned)
|
||||||
|
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
|
||||||
|
*/
|
||||||
|
static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
|
||||||
|
{
|
||||||
|
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
#endif /* !CONFIG_USER_ONLY */
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
|
@ -422,8 +422,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((addr & TARGET_PAGE_MASK)
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
||||||
/* TLB entry is for a different page */
|
/* TLB entry is for a different page */
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue