mirror of https://github.com/xemu-project/xemu.git
target/arm: New function allocation_tag_mem_probe()
For the FEAT_MOPS operations, the existing allocation_tag_mem() function almost does what we want, but it will take a watchpoint exception even for an ra == 0 probe request, and it requires that the caller guarantee that the memory is accessible. For FEAT_MOPS we want a function that will not take any kind of exception, and will return NULL for the not-accessible case. Rename allocation_tag_mem() to allocation_tag_mem_probe() and add an extra 'probe' argument that lets us distinguish these cases; allocation_tag_mem() is now a wrapper that always passes 'false'. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20230912140434.1333369-6-peter.maydell@linaro.org
This commit is contained in:
parent
31aaaddecb
commit
aa03378bcc
|
@ -50,13 +50,14 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* allocation_tag_mem:
|
* allocation_tag_mem_probe:
|
||||||
* @env: the cpu environment
|
* @env: the cpu environment
|
||||||
* @ptr_mmu_idx: the addressing regime to use for the virtual address
|
* @ptr_mmu_idx: the addressing regime to use for the virtual address
|
||||||
* @ptr: the virtual address for which to look up tag memory
|
* @ptr: the virtual address for which to look up tag memory
|
||||||
* @ptr_access: the access to use for the virtual address
|
* @ptr_access: the access to use for the virtual address
|
||||||
* @ptr_size: the number of bytes in the normal memory access
|
* @ptr_size: the number of bytes in the normal memory access
|
||||||
* @tag_access: the access to use for the tag memory
|
* @tag_access: the access to use for the tag memory
|
||||||
|
* @probe: true to merely probe, never taking an exception
|
||||||
* @ra: the return address for exception handling
|
* @ra: the return address for exception handling
|
||||||
*
|
*
|
||||||
* Our tag memory is formatted as a sequence of little-endian nibbles.
|
* Our tag memory is formatted as a sequence of little-endian nibbles.
|
||||||
|
@ -65,15 +66,25 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
||||||
* for the higher addr.
|
* for the higher addr.
|
||||||
*
|
*
|
||||||
* Here, resolve the physical address from the virtual address, and return
|
* Here, resolve the physical address from the virtual address, and return
|
||||||
* a pointer to the corresponding tag byte. Exit with exception if the
|
* a pointer to the corresponding tag byte.
|
||||||
* virtual address is not accessible for @ptr_access.
|
|
||||||
*
|
*
|
||||||
* If there is no tag storage corresponding to @ptr, return NULL.
|
* If there is no tag storage corresponding to @ptr, return NULL.
|
||||||
|
*
|
||||||
|
* If the page is inaccessible for @ptr_access, or has a watchpoint, there are
|
||||||
|
* three options:
|
||||||
|
* (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not
|
||||||
|
* accessible, and do not take watchpoint traps. The calling code must
|
||||||
|
* handle those cases in the right priority compared to MTE traps.
|
||||||
|
* (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees
|
||||||
|
* that the page is going to be accessible. We will take watchpoint traps.
|
||||||
|
* (3) probe = false, ra != 0 : non-probe -- we will take both memory access
|
||||||
|
* traps and watchpoint traps.
|
||||||
|
* (probe = true, ra != 0 is invalid and will assert.)
|
||||||
*/
|
*/
|
||||||
static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
|
||||||
uint64_t ptr, MMUAccessType ptr_access,
|
uint64_t ptr, MMUAccessType ptr_access,
|
||||||
int ptr_size, MMUAccessType tag_access,
|
int ptr_size, MMUAccessType tag_access,
|
||||||
uintptr_t ra)
|
bool probe, uintptr_t ra)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
uint64_t clean_ptr = useronly_clean_ptr(ptr);
|
uint64_t clean_ptr = useronly_clean_ptr(ptr);
|
||||||
|
@ -81,6 +92,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||||
uint8_t *tags;
|
uint8_t *tags;
|
||||||
uintptr_t index;
|
uintptr_t index;
|
||||||
|
|
||||||
|
assert(!(probe && ra));
|
||||||
|
|
||||||
if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
|
if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
|
||||||
cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
|
cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
|
||||||
!(flags & PAGE_VALID), ra);
|
!(flags & PAGE_VALID), ra);
|
||||||
|
@ -111,12 +124,16 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||||
* exception for inaccessible pages, and resolves the virtual address
|
* exception for inaccessible pages, and resolves the virtual address
|
||||||
* into the softmmu tlb.
|
* into the softmmu tlb.
|
||||||
*
|
*
|
||||||
* When RA == 0, this is for mte_probe. The page is expected to be
|
* When RA == 0, this is either a pure probe or a no-fault-expected probe.
|
||||||
* valid. Indicate to probe_access_flags no-fault, then assert that
|
* Indicate to probe_access_flags no-fault, then either return NULL
|
||||||
* we received a valid page.
|
* for the pure probe, or assert that we received a valid page for the
|
||||||
|
* no-fault-expected probe.
|
||||||
*/
|
*/
|
||||||
flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
|
flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
|
||||||
ra == 0, &host, &full, ra);
|
ra == 0, &host, &full, ra);
|
||||||
|
if (probe && (flags & TLB_INVALID_MASK)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
assert(!(flags & TLB_INVALID_MASK));
|
assert(!(flags & TLB_INVALID_MASK));
|
||||||
|
|
||||||
/* If the virtual page MemAttr != Tagged, access unchecked. */
|
/* If the virtual page MemAttr != Tagged, access unchecked. */
|
||||||
|
@ -157,7 +174,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Any debug exception has priority over a tag check exception. */
|
/* Any debug exception has priority over a tag check exception. */
|
||||||
if (unlikely(flags & TLB_WATCHPOINT)) {
|
if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
|
||||||
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
|
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
|
||||||
assert(ra != 0);
|
assert(ra != 0);
|
||||||
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
|
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
|
||||||
|
@ -199,6 +216,15 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||||
|
uint64_t ptr, MMUAccessType ptr_access,
|
||||||
|
int ptr_size, MMUAccessType tag_access,
|
||||||
|
uintptr_t ra)
|
||||||
|
{
|
||||||
|
return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
|
||||||
|
ptr_size, tag_access, false, ra);
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
|
uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
|
||||||
{
|
{
|
||||||
uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
|
uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
|
||||||
|
|
Loading…
Reference in New Issue