mirror of https://github.com/xemu-project/xemu.git
accel/tcg: Merge io_writex into do_st_mmio_leN
Avoid multiple calls to io_prepare for unaligned acceses. One call to do_st_mmio_leN will never cross pages. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
13e617475d
commit
5646d6a70f
|
@ -1388,30 +1388,6 @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
|
||||||
int mmu_idx, uint64_t val, vaddr addr,
|
|
||||||
uintptr_t retaddr, MemOp op)
|
|
||||||
{
|
|
||||||
MemoryRegionSection *section;
|
|
||||||
hwaddr mr_offset;
|
|
||||||
MemoryRegion *mr;
|
|
||||||
MemTxResult r;
|
|
||||||
|
|
||||||
section = io_prepare(&mr_offset, env, full->xlat_section,
|
|
||||||
full->attrs, addr, retaddr);
|
|
||||||
mr = section->mr;
|
|
||||||
|
|
||||||
{
|
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
|
||||||
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (r != MEMTX_OK) {
|
|
||||||
io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
|
|
||||||
r, retaddr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return true if ADDR is present in the victim tlb, and has been copied
|
/* Return true if ADDR is present in the victim tlb, and has been copied
|
||||||
back to the main tlb. */
|
back to the main tlb. */
|
||||||
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||||
|
@ -2682,39 +2658,41 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
|
||||||
uint64_t val_le, vaddr addr, int size,
|
uint64_t val_le, vaddr addr, int size,
|
||||||
int mmu_idx, uintptr_t ra)
|
int mmu_idx, uintptr_t ra)
|
||||||
{
|
{
|
||||||
|
MemoryRegionSection *section;
|
||||||
|
hwaddr mr_offset;
|
||||||
|
MemoryRegion *mr;
|
||||||
|
MemTxAttrs attrs;
|
||||||
|
|
||||||
tcg_debug_assert(size > 0 && size <= 8);
|
tcg_debug_assert(size > 0 && size <= 8);
|
||||||
|
|
||||||
|
attrs = full->attrs;
|
||||||
|
section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
|
||||||
|
mr = section->mr;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
MemOp this_mop;
|
||||||
|
unsigned this_size;
|
||||||
|
MemTxResult r;
|
||||||
|
|
||||||
/* Store aligned pieces up to 8 bytes. */
|
/* Store aligned pieces up to 8 bytes. */
|
||||||
switch ((size | (int)addr) & 7) {
|
this_mop = ctz32(size | (int)addr | 8);
|
||||||
case 1:
|
this_size = 1 << this_mop;
|
||||||
case 3:
|
this_mop |= MO_LE;
|
||||||
case 5:
|
|
||||||
case 7:
|
r = memory_region_dispatch_write(mr, mr_offset, val_le,
|
||||||
io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
|
this_mop, attrs);
|
||||||
val_le >>= 8;
|
if (unlikely(r != MEMTX_OK)) {
|
||||||
size -= 1;
|
io_failed(env, full, addr, this_size, MMU_DATA_STORE,
|
||||||
addr += 1;
|
mmu_idx, r, ra);
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
case 6:
|
|
||||||
io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
|
|
||||||
val_le >>= 16;
|
|
||||||
size -= 2;
|
|
||||||
addr += 2;
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
|
|
||||||
val_le >>= 32;
|
|
||||||
size -= 4;
|
|
||||||
addr += 4;
|
|
||||||
break;
|
|
||||||
case 0:
|
|
||||||
io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
|
|
||||||
return 0;
|
|
||||||
default:
|
|
||||||
qemu_build_not_reached();
|
|
||||||
}
|
}
|
||||||
|
if (this_size == 8) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
val_le >>= this_size * 8;
|
||||||
|
addr += this_size;
|
||||||
|
mr_offset += this_size;
|
||||||
|
size -= this_size;
|
||||||
} while (size);
|
} while (size);
|
||||||
|
|
||||||
return val_le;
|
return val_le;
|
||||||
|
|
Loading…
Reference in New Issue