tcg/ppc: Support tlb offsets larger than 64k

AArch64 with SVE has an offset of 80k to the 8th TLB.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2018-01-10 07:47:25 +00:00
parent 71f9cee9d0
commit 4a64e0fd68
1 changed files with 8 additions and 9 deletions

View File

@ -1524,16 +1524,15 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
/* Compensate for very large offsets. */ /* Compensate for very large offsets. */
if (add_off >= 0x8000) { if (add_off >= 0x8000) {
/* Most target env are smaller than 32k; none are larger than 64k. int low = (int16_t)cmp_off;
Simplify the logic here merely to offset by 0x7ff0, giving us a int high = cmp_off - low;
range just shy of 64k. Check this assumption. */ assert((high & 0xffff) == 0);
QEMU_BUILD_BUG_ON(offsetof(CPUArchState, assert(cmp_off - high == (int16_t)(cmp_off - high));
tlb_table[NB_MMU_MODES - 1][1]) assert(add_off - high == (int16_t)(add_off - high));
> 0x7ff0 + 0x7fff); tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, base, high >> 16));
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, base, 0x7ff0));
base = TCG_REG_TMP1; base = TCG_REG_TMP1;
cmp_off -= 0x7ff0; cmp_off -= high;
add_off -= 0x7ff0; add_off -= high;
} }
/* Extraction and shifting, part 2. */ /* Extraction and shifting, part 2. */