mirror of https://github.com/xemu-project/xemu.git
target/arm: Implement MTE tag-checking functions for FEAT_MOPS copies
The FEAT_MOPS memory copy operations need an extra helper routine for checking for MTE tag checking failures beyond the ones we already added for memory set operations: * mte_mops_probe_rev() does the same job as mte_mops_probe(), but it checks tags starting at the provided address and working backwards, rather than forwards Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20230912140434.1333369-11-peter.maydell@linaro.org
This commit is contained in:
parent
6087df5744
commit
69c51dc372
|
@ -1288,6 +1288,23 @@ uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
|
||||||
uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
|
uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||||
uint32_t desc);
|
uint32_t desc);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
|
||||||
|
* operation going in the reverse direction
|
||||||
|
* @env: CPU env
|
||||||
|
* @ptr: *end* address of memory region (dirty pointer)
|
||||||
|
* @size: length of region (guaranteed not to cross a page boundary)
|
||||||
|
* @desc: MTEDESC descriptor word (0 means no MTE checks)
|
||||||
|
* Returns: the size of the region that can be copied without hitting
|
||||||
|
* an MTE tag failure
|
||||||
|
*
|
||||||
|
* Note that we assume that the caller has already checked the TBI
|
||||||
|
* and TCMA bits with mte_checks_needed() and an MTE check is definitely
|
||||||
|
* required.
|
||||||
|
*/
|
||||||
|
uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||||
|
uint32_t desc);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mte_check_fail: Record an MTE tag check failure
|
* mte_check_fail: Record an MTE tag check failure
|
||||||
* @env: CPU env
|
* @env: CPU env
|
||||||
|
|
|
@ -734,6 +734,55 @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* checkNrev:
|
||||||
|
* @tag: tag memory to test
|
||||||
|
* @odd: true to begin testing at tags at odd nibble
|
||||||
|
* @cmp: the tag to compare against
|
||||||
|
* @count: number of tags to test
|
||||||
|
*
|
||||||
|
* Return the number of successful tests.
|
||||||
|
* Thus a return value < @count indicates a failure.
|
||||||
|
*
|
||||||
|
* This is like checkN, but it runs backwards, checking the
|
||||||
|
* tags starting with @tag and then the tags preceding it.
|
||||||
|
* This is needed by the backwards-memory-copying operations.
|
||||||
|
*/
|
||||||
|
static int checkNrev(uint8_t *mem, int odd, int cmp, int count)
|
||||||
|
{
|
||||||
|
int n = 0, diff;
|
||||||
|
|
||||||
|
/* Replicate the test tag and compare. */
|
||||||
|
cmp *= 0x11;
|
||||||
|
diff = *mem-- ^ cmp;
|
||||||
|
|
||||||
|
if (!odd) {
|
||||||
|
goto start_even;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
/* Test odd tag. */
|
||||||
|
if (unlikely((diff) & 0xf0)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (++n == count) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
start_even:
|
||||||
|
/* Test even tag. */
|
||||||
|
if (unlikely((diff) & 0x0f)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (++n == count) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
diff = *mem-- ^ cmp;
|
||||||
|
}
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mte_probe_int() - helper for mte_probe and mte_check
|
* mte_probe_int() - helper for mte_probe and mte_check
|
||||||
* @env: CPU environment
|
* @env: CPU environment
|
||||||
|
@ -1042,6 +1091,56 @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||||
|
uint32_t desc)
|
||||||
|
{
|
||||||
|
int mmu_idx, tag_count;
|
||||||
|
uint64_t ptr_tag, tag_first, tag_last;
|
||||||
|
void *mem;
|
||||||
|
bool w = FIELD_EX32(desc, MTEDESC, WRITE);
|
||||||
|
uint32_t n;
|
||||||
|
|
||||||
|
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||||
|
/* True probe; this will never fault */
|
||||||
|
mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
|
||||||
|
w ? MMU_DATA_STORE : MMU_DATA_LOAD,
|
||||||
|
size, MMU_DATA_LOAD, true, 0);
|
||||||
|
if (!mem) {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TODO: checkNrev() is not designed for checks of the size we expect
|
||||||
|
* for FEAT_MOPS operations, so we should implement this differently.
|
||||||
|
* Maybe we should do something like
|
||||||
|
* if (region start and size are aligned nicely) {
|
||||||
|
* do direct loads of 64 tag bits at a time;
|
||||||
|
* } else {
|
||||||
|
* call checkN()
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
/* Round the bounds to the tag granule, and compute the number of tags. */
|
||||||
|
ptr_tag = allocation_tag_from_addr(ptr);
|
||||||
|
tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE);
|
||||||
|
tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
|
||||||
|
tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
|
||||||
|
n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
|
||||||
|
if (likely(n == tag_count)) {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Failure; for the first granule, it's at @ptr. Otherwise
|
||||||
|
* it's at the last byte of the nth granule. Calculate how
|
||||||
|
* many bytes we can access without hitting that failure.
|
||||||
|
*/
|
||||||
|
if (n == 0) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
|
void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||||
uint32_t desc)
|
uint32_t desc)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue