mirror of https://github.com/xemu-project/xemu.git
accel/tcg: Check whether TLB entry is RAM consistently with how we set it up
We set up TLB entries in tlb_set_page_with_attrs(), where we have some logic for determining whether the TLB entry is considered to be RAM-backed, and thus has a valid addend field. When we look at the TLB entry in get_page_addr_code(), we use different logic for determining whether to treat the page as RAM-backed and use the addend field. This is confusing, and in fact buggy, because the code in tlb_set_page_with_attrs() correctly decides that rom_device memory regions not in romd mode are not RAM-backed, but the code in get_page_addr_code() thinks they are RAM-backed. This typically results in "Bad ram pointer" assertion if the guest tries to execute from such a memory region. Fix this by making get_page_addr_code() just look at the TLB_MMIO bit in the code_address field of the TLB, which tlb_set_page_with_attrs() sets if and only if the addend field is not valid for code execution. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20180713150945.12348-1-peter.maydell@linaro.org
This commit is contained in:
parent
d4b6275df3
commit
55a7cb144d
|
@ -926,10 +926,6 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
{
|
{
|
||||||
int mmu_idx, index;
|
int mmu_idx, index;
|
||||||
void *p;
|
void *p;
|
||||||
MemoryRegion *mr;
|
|
||||||
MemoryRegionSection *section;
|
|
||||||
CPUState *cpu = ENV_GET_CPU(env);
|
|
||||||
CPUIOTLBEntry *iotlbentry;
|
|
||||||
|
|
||||||
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
mmu_idx = cpu_mmu_index(env, true);
|
mmu_idx = cpu_mmu_index(env, true);
|
||||||
|
@ -940,28 +936,19 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
|
||||||
assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
|
assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
|
if (unlikely(env->tlb_table[mmu_idx][index].addr_code &
|
||||||
|
(TLB_RECHECK | TLB_MMIO))) {
|
||||||
/*
|
/*
|
||||||
* This is a TLB_RECHECK access, where the MMU protection
|
* Return -1 if we can't translate and execute from an entire
|
||||||
* covers a smaller range than a target page. Return -1 to
|
* page of RAM here, which will cause us to execute by loading
|
||||||
* indicate that we cannot simply execute from RAM here;
|
* and translating one insn at a time, without caching:
|
||||||
* we will perform the necessary repeat of the MMU check
|
* - TLB_RECHECK: means the MMU protection covers a smaller range
|
||||||
* when the "execute a single insn" code performs the
|
* than a target page, so we must redo the MMU check every insn
|
||||||
* load of the guest insn.
|
* - TLB_MMIO: region is not backed by RAM
|
||||||
*/
|
*/
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
iotlbentry = &env->iotlb[mmu_idx][index];
|
|
||||||
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
|
|
||||||
mr = section->mr;
|
|
||||||
if (memory_region_is_unassigned(mr)) {
|
|
||||||
/*
|
|
||||||
* Not guest RAM, so there is no ram_addr_t for it. Return -1,
|
|
||||||
* and we will execute a single insn from this device.
|
|
||||||
*/
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
|
p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
|
||||||
return qemu_ram_addr_from_host_nofail(p);
|
return qemu_ram_addr_from_host_nofail(p);
|
||||||
}
|
}
|
||||||
|
|
6
exec.c
6
exec.c
|
@ -402,12 +402,6 @@ static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool memory_region_is_unassigned(MemoryRegion *mr)
|
|
||||||
{
|
|
||||||
return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
|
|
||||||
&& mr != &io_mem_watch;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called from RCU critical section */
|
/* Called from RCU critical section */
|
||||||
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
|
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
|
||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
|
|
|
@ -502,8 +502,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||||
hwaddr paddr, hwaddr xlat,
|
hwaddr paddr, hwaddr xlat,
|
||||||
int prot,
|
int prot,
|
||||||
target_ulong *address);
|
target_ulong *address);
|
||||||
bool memory_region_is_unassigned(MemoryRegion *mr);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* vl.c */
|
/* vl.c */
|
||||||
|
|
Loading…
Reference in New Issue