mirror of https://github.com/xemu-project/xemu.git
target/arm: Use set/clear_helper_retaddr in SVE and SME helpers
Avoid a race condition with munmap in another thread. Use around blocks that exclusively use "host_fn". Keep the blocks as small as possible, but without setting and clearing for every operation on one page. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
8009519b30
commit
3b9991e35c
|
@ -517,6 +517,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
clr_fn(za, 0, reg_off);
|
clr_fn(za, 0, reg_off);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_helper_retaddr(ra);
|
||||||
|
|
||||||
while (reg_off <= reg_last) {
|
while (reg_off <= reg_last) {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -529,6 +531,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
} while (reg_off <= reg_last && (reg_off & 63));
|
} while (reg_off <= reg_last && (reg_off & 63));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the slow path to manage the cross-page misalignment.
|
* Use the slow path to manage the cross-page misalignment.
|
||||||
* But we know this is RAM and cannot trap.
|
* But we know this is RAM and cannot trap.
|
||||||
|
@ -543,6 +547,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
reg_last = info.reg_off_last[1];
|
reg_last = info.reg_off_last[1];
|
||||||
host = info.page[1].host;
|
host = info.page[1].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(ra);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -554,6 +560,8 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
reg_off += esize;
|
reg_off += esize;
|
||||||
} while (reg_off & 63);
|
} while (reg_off & 63);
|
||||||
} while (reg_off <= reg_last);
|
} while (reg_off <= reg_last);
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -701,6 +709,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
reg_last = info.reg_off_last[0];
|
reg_last = info.reg_off_last[0];
|
||||||
host = info.page[0].host;
|
host = info.page[0].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(ra);
|
||||||
|
|
||||||
while (reg_off <= reg_last) {
|
while (reg_off <= reg_last) {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -711,6 +721,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
} while (reg_off <= reg_last && (reg_off & 63));
|
} while (reg_off <= reg_last && (reg_off & 63));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the slow path to manage the cross-page misalignment.
|
* Use the slow path to manage the cross-page misalignment.
|
||||||
* But we know this is RAM and cannot trap.
|
* But we know this is RAM and cannot trap.
|
||||||
|
@ -725,6 +737,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
reg_last = info.reg_off_last[1];
|
reg_last = info.reg_off_last[1];
|
||||||
host = info.page[1].host;
|
host = info.page[1].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(ra);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -734,6 +748,8 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
|
||||||
reg_off += 1 << esz;
|
reg_off += 1 << esz;
|
||||||
} while (reg_off & 63);
|
} while (reg_off & 63);
|
||||||
} while (reg_off <= reg_last);
|
} while (reg_off <= reg_last);
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5738,6 +5738,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
|
||||||
reg_last = info.reg_off_last[0];
|
reg_last = info.reg_off_last[0];
|
||||||
host = info.page[0].host;
|
host = info.page[0].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
|
|
||||||
while (reg_off <= reg_last) {
|
while (reg_off <= reg_last) {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -5752,6 +5754,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
|
||||||
} while (reg_off <= reg_last && (reg_off & 63));
|
} while (reg_off <= reg_last && (reg_off & 63));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the slow path to manage the cross-page misalignment.
|
* Use the slow path to manage the cross-page misalignment.
|
||||||
* But we know this is RAM and cannot trap.
|
* But we know this is RAM and cannot trap.
|
||||||
|
@ -5771,6 +5775,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
|
||||||
reg_last = info.reg_off_last[1];
|
reg_last = info.reg_off_last[1];
|
||||||
host = info.page[1].host;
|
host = info.page[1].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -5784,6 +5790,8 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
|
||||||
mem_off += N << msz;
|
mem_off += N << msz;
|
||||||
} while (reg_off & 63);
|
} while (reg_off & 63);
|
||||||
} while (reg_off <= reg_last);
|
} while (reg_off <= reg_last);
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5934,15 +5942,11 @@ DO_LDN_2(4, dd, MO_64)
|
||||||
/*
|
/*
|
||||||
* Load contiguous data, first-fault and no-fault.
|
* Load contiguous data, first-fault and no-fault.
|
||||||
*
|
*
|
||||||
* For user-only, one could argue that we should hold the mmap_lock during
|
* For user-only, we control the race between page_check_range and
|
||||||
* the operation so that there is no race between page_check_range and the
|
* another thread's munmap by using set/clear_helper_retaddr. Any
|
||||||
* load operation. However, unmapping pages out from under a running thread
|
* SEGV that occurs between those markers is assumed to be because
|
||||||
* is extraordinarily unlikely. This theoretical race condition also affects
|
* the guest page vanished. Keep that block as small as possible
|
||||||
* linux-user/ in its get_user/put_user macros.
|
* so that unrelated QEMU bugs are not blamed on the guest.
|
||||||
*
|
|
||||||
* TODO: Construct some helpers, written in assembly, that interact with
|
|
||||||
* host_signal_handler to produce memory ops which can properly report errors
|
|
||||||
* without racing.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Fault on byte I. All bits in FFR from I are cleared. The vector
|
/* Fault on byte I. All bits in FFR from I are cleared. The vector
|
||||||
|
@ -6093,6 +6097,8 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
|
||||||
reg_last = info.reg_off_last[0];
|
reg_last = info.reg_off_last[0];
|
||||||
host = info.page[0].host;
|
host = info.page[0].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
uint64_t pg = *(uint64_t *)(vg + (reg_off >> 3));
|
uint64_t pg = *(uint64_t *)(vg + (reg_off >> 3));
|
||||||
do {
|
do {
|
||||||
|
@ -6101,9 +6107,11 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
|
||||||
(cpu_watchpoint_address_matches
|
(cpu_watchpoint_address_matches
|
||||||
(env_cpu(env), addr + mem_off, 1 << msz)
|
(env_cpu(env), addr + mem_off, 1 << msz)
|
||||||
& BP_MEM_READ)) {
|
& BP_MEM_READ)) {
|
||||||
|
clear_helper_retaddr();
|
||||||
goto do_fault;
|
goto do_fault;
|
||||||
}
|
}
|
||||||
if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
|
if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
|
||||||
|
clear_helper_retaddr();
|
||||||
goto do_fault;
|
goto do_fault;
|
||||||
}
|
}
|
||||||
host_fn(vd, reg_off, host + mem_off);
|
host_fn(vd, reg_off, host + mem_off);
|
||||||
|
@ -6113,6 +6121,8 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
|
||||||
} while (reg_off <= reg_last && (reg_off & 63));
|
} while (reg_off <= reg_last && (reg_off & 63));
|
||||||
} while (reg_off <= reg_last);
|
} while (reg_off <= reg_last);
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MemSingleNF is allowed to fail for any reason. We have special
|
* MemSingleNF is allowed to fail for any reason. We have special
|
||||||
* code above to handle the first element crossing a page boundary.
|
* code above to handle the first element crossing a page boundary.
|
||||||
|
@ -6348,6 +6358,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
|
||||||
reg_last = info.reg_off_last[0];
|
reg_last = info.reg_off_last[0];
|
||||||
host = info.page[0].host;
|
host = info.page[0].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
|
|
||||||
while (reg_off <= reg_last) {
|
while (reg_off <= reg_last) {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -6362,6 +6374,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
|
||||||
} while (reg_off <= reg_last && (reg_off & 63));
|
} while (reg_off <= reg_last && (reg_off & 63));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the slow path to manage the cross-page misalignment.
|
* Use the slow path to manage the cross-page misalignment.
|
||||||
* But we know this is RAM and cannot trap.
|
* But we know this is RAM and cannot trap.
|
||||||
|
@ -6381,6 +6395,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
|
||||||
reg_last = info.reg_off_last[1];
|
reg_last = info.reg_off_last[1];
|
||||||
host = info.page[1].host;
|
host = info.page[1].host;
|
||||||
|
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
uint64_t pg = vg[reg_off >> 6];
|
uint64_t pg = vg[reg_off >> 6];
|
||||||
do {
|
do {
|
||||||
|
@ -6394,6 +6410,8 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
|
||||||
mem_off += N << msz;
|
mem_off += N << msz;
|
||||||
} while (reg_off & 63);
|
} while (reg_off & 63);
|
||||||
} while (reg_off <= reg_last);
|
} while (reg_off <= reg_last);
|
||||||
|
|
||||||
|
clear_helper_retaddr();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6560,7 +6578,9 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
||||||
if (unlikely(info.flags & TLB_MMIO)) {
|
if (unlikely(info.flags & TLB_MMIO)) {
|
||||||
tlb_fn(env, &scratch, reg_off, addr, retaddr);
|
tlb_fn(env, &scratch, reg_off, addr, retaddr);
|
||||||
} else {
|
} else {
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
host_fn(&scratch, reg_off, info.host);
|
host_fn(&scratch, reg_off, info.host);
|
||||||
|
clear_helper_retaddr();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Element crosses the page boundary. */
|
/* Element crosses the page boundary. */
|
||||||
|
@ -6782,7 +6802,9 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
||||||
goto fault;
|
goto fault;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
host_fn(vd, reg_off, info.host);
|
host_fn(vd, reg_off, info.host);
|
||||||
|
clear_helper_retaddr();
|
||||||
}
|
}
|
||||||
reg_off += esize;
|
reg_off += esize;
|
||||||
} while (reg_off & 63);
|
} while (reg_off & 63);
|
||||||
|
@ -6986,7 +7008,9 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
||||||
do {
|
do {
|
||||||
void *h = host[i];
|
void *h = host[i];
|
||||||
if (likely(h != NULL)) {
|
if (likely(h != NULL)) {
|
||||||
|
set_helper_retaddr(retaddr);
|
||||||
host_fn(vd, reg_off, h);
|
host_fn(vd, reg_off, h);
|
||||||
|
clear_helper_retaddr();
|
||||||
} else if ((vg[reg_off >> 6] >> (reg_off & 63)) & 1) {
|
} else if ((vg[reg_off >> 6] >> (reg_off & 63)) & 1) {
|
||||||
target_ulong addr = base + (off_fn(vm, reg_off) << scale);
|
target_ulong addr = base + (off_fn(vm, reg_off) << scale);
|
||||||
tlb_fn(env, vd, reg_off, addr, retaddr);
|
tlb_fn(env, vd, reg_off, addr, retaddr);
|
||||||
|
|
Loading…
Reference in New Issue