mirror of https://github.com/xemu-project/xemu.git
accel/tcg: Split out io_prepare and io_failed
These are common code from io_readx and io_writex. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
da6aef48d9
commit
fb3cb376e9
|
@ -1267,7 +1267,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
||||||
* (non-page-aligned) vaddr of the eventual memory access to get
|
* (non-page-aligned) vaddr of the eventual memory access to get
|
||||||
* the MemoryRegion offset for the access. Note that the vaddr we
|
* the MemoryRegion offset for the access. Note that the vaddr we
|
||||||
* subtract here is that of the page base, and not the same as the
|
* subtract here is that of the page base, and not the same as the
|
||||||
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
|
* vaddr we add back in io_prepare()/get_page_addr_code().
|
||||||
*/
|
*/
|
||||||
desc->fulltlb[index] = *full;
|
desc->fulltlb[index] = *full;
|
||||||
full = &desc->fulltlb[index];
|
full = &desc->fulltlb[index];
|
||||||
|
@ -1367,37 +1367,60 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
static MemoryRegionSection *
|
||||||
int mmu_idx, vaddr addr, uintptr_t retaddr,
|
io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
|
||||||
MMUAccessType access_type, MemOp op)
|
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
CPUState *cpu = env_cpu(env);
|
CPUState *cpu = env_cpu(env);
|
||||||
hwaddr mr_offset;
|
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section;
|
||||||
MemoryRegion *mr;
|
hwaddr mr_offset;
|
||||||
uint64_t val;
|
|
||||||
MemTxResult r;
|
|
||||||
|
|
||||||
section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
section = iotlb_to_section(cpu, xlat, attrs);
|
||||||
mr = section->mr;
|
mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
|
||||||
mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
|
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
if (!cpu->can_do_io) {
|
if (!cpu->can_do_io) {
|
||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*out_offset = mr_offset;
|
||||||
|
return section;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
|
||||||
|
unsigned size, MMUAccessType access_type, int mmu_idx,
|
||||||
|
MemTxResult response, uintptr_t retaddr,
|
||||||
|
MemoryRegionSection *section, hwaddr mr_offset)
|
||||||
|
{
|
||||||
|
hwaddr physaddr = (mr_offset +
|
||||||
|
section->offset_within_address_space -
|
||||||
|
section->offset_within_region);
|
||||||
|
|
||||||
|
cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
|
||||||
|
mmu_idx, full->attrs, response, retaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
||||||
|
int mmu_idx, vaddr addr, uintptr_t retaddr,
|
||||||
|
MMUAccessType access_type, MemOp op)
|
||||||
|
{
|
||||||
|
MemoryRegionSection *section;
|
||||||
|
hwaddr mr_offset;
|
||||||
|
MemoryRegion *mr;
|
||||||
|
MemTxResult r;
|
||||||
|
uint64_t val;
|
||||||
|
|
||||||
|
section = io_prepare(&mr_offset, env, full->xlat_section,
|
||||||
|
full->attrs, addr, retaddr);
|
||||||
|
mr = section->mr;
|
||||||
|
|
||||||
{
|
{
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
|
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
hwaddr physaddr = mr_offset +
|
io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
|
||||||
section->offset_within_address_space -
|
r, retaddr, section, mr_offset);
|
||||||
section->offset_within_region;
|
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
|
|
||||||
mmu_idx, full->attrs, r, retaddr);
|
|
||||||
}
|
}
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@ -1406,19 +1429,14 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
||||||
int mmu_idx, uint64_t val, vaddr addr,
|
int mmu_idx, uint64_t val, vaddr addr,
|
||||||
uintptr_t retaddr, MemOp op)
|
uintptr_t retaddr, MemOp op)
|
||||||
{
|
{
|
||||||
CPUState *cpu = env_cpu(env);
|
|
||||||
hwaddr mr_offset;
|
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section;
|
||||||
|
hwaddr mr_offset;
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
MemTxResult r;
|
MemTxResult r;
|
||||||
|
|
||||||
section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
section = io_prepare(&mr_offset, env, full->xlat_section,
|
||||||
|
full->attrs, addr, retaddr);
|
||||||
mr = section->mr;
|
mr = section->mr;
|
||||||
mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
|
|
||||||
if (!cpu->can_do_io) {
|
|
||||||
cpu_io_recompile(cpu, retaddr);
|
|
||||||
}
|
|
||||||
cpu->mem_io_pc = retaddr;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
|
@ -1426,13 +1444,8 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r != MEMTX_OK) {
|
if (r != MEMTX_OK) {
|
||||||
hwaddr physaddr = mr_offset +
|
io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
|
||||||
section->offset_within_address_space -
|
r, retaddr, section, mr_offset);
|
||||||
section->offset_within_region;
|
|
||||||
|
|
||||||
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
|
|
||||||
MMU_DATA_STORE, mmu_idx, full->attrs, r,
|
|
||||||
retaddr);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue