tcg/aarch64: Rename temporaries

We will need to allocate a second general-purpose temporary.
Rename the existing temps to add a distinguishing number.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-17 15:33:17 +02:00
parent 098d0fc10d
commit d67bcbddce
1 changed files with 25 additions and 25 deletions

View File

@ -71,8 +71,8 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
return TCG_REG_X0 + slot; return TCG_REG_X0 + slot;
} }
#define TCG_REG_TMP TCG_REG_X30 #define TCG_REG_TMP0 TCG_REG_X30
#define TCG_VEC_TMP TCG_REG_V31 #define TCG_VEC_TMP0 TCG_REG_V31
#ifndef CONFIG_SOFTMMU #ifndef CONFIG_SOFTMMU
#define TCG_REG_GUEST_BASE TCG_REG_X28 #define TCG_REG_GUEST_BASE TCG_REG_X28
@ -984,7 +984,7 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg r, TCGReg base, intptr_t offset) TCGReg r, TCGReg base, intptr_t offset)
{ {
TCGReg temp = TCG_REG_TMP; TCGReg temp = TCG_REG_TMP0;
if (offset < -0xffffff || offset > 0xffffff) { if (offset < -0xffffff || offset > 0xffffff) {
tcg_out_movi(s, TCG_TYPE_PTR, temp, offset); tcg_out_movi(s, TCG_TYPE_PTR, temp, offset);
@ -1136,8 +1136,8 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
} }
/* Worst-case scenario, move offset to temp register, use reg offset. */ /* Worst-case scenario, move offset to temp register, use reg offset. */
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, offset);
tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP); tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP0);
} }
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
@ -1353,8 +1353,8 @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target)
if (offset == sextract64(offset, 0, 26)) { if (offset == sextract64(offset, 0, 26)) {
tcg_out_insn(s, 3206, BL, offset); tcg_out_insn(s, 3206, BL, offset);
} else { } else {
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
tcg_out_insn(s, 3207, BLR, TCG_REG_TMP); tcg_out_insn(s, 3207, BLR, TCG_REG_TMP0);
} }
} }
@ -1491,7 +1491,7 @@ static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
AArch64Insn insn; AArch64Insn insn;
if (rl == ah || (!const_bh && rl == bh)) { if (rl == ah || (!const_bh && rl == bh)) {
rl = TCG_REG_TMP; rl = TCG_REG_TMP0;
} }
if (const_bl) { if (const_bl) {
@ -1508,7 +1508,7 @@ static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
possibility of adding 0+const in the low part, and the possibility of adding 0+const in the low part, and the
immediate add instructions encode XSP not XZR. Don't try immediate add instructions encode XSP not XZR. Don't try
anything more elaborate here than loading another zero. */ anything more elaborate here than loading another zero. */
al = TCG_REG_TMP; al = TCG_REG_TMP0;
tcg_out_movi(s, ext, al, 0); tcg_out_movi(s, ext, al, 0);
} }
tcg_out_insn_3401(s, insn, ext, rl, al, bl); tcg_out_insn_3401(s, insn, ext, rl, al, bl);
@ -1549,7 +1549,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
{ {
TCGReg a1 = a0; TCGReg a1 = a0;
if (is_ctz) { if (is_ctz) {
a1 = TCG_REG_TMP; a1 = TCG_REG_TMP0;
tcg_out_insn(s, 3507, RBIT, ext, a1, a0); tcg_out_insn(s, 3507, RBIT, ext, a1, a0);
} }
if (const_b && b == (ext ? 64 : 32)) { if (const_b && b == (ext ? 64 : 32)) {
@ -1558,7 +1558,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
AArch64Insn sel = I3506_CSEL; AArch64Insn sel = I3506_CSEL;
tcg_out_cmp(s, ext, a0, 0, 1); tcg_out_cmp(s, ext, a0, 0, 1);
tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1); tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1);
if (const_b) { if (const_b) {
if (b == -1) { if (b == -1) {
@ -1571,7 +1571,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
b = d; b = d;
} }
} }
tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE); tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP0, b, TCG_COND_NE);
} }
} }
@ -1588,7 +1588,7 @@ bool tcg_target_has_memory_bswap(MemOp memop)
} }
static const TCGLdstHelperParam ldst_helper_param = { static const TCGLdstHelperParam ldst_helper_param = {
.ntmp = 1, .tmp = { TCG_REG_TMP } .ntmp = 1, .tmp = { TCG_REG_TMP0 }
}; };
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@ -1847,7 +1847,7 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
set_jmp_insn_offset(s, which); set_jmp_insn_offset(s, which);
tcg_out32(s, I3206_B); tcg_out32(s, I3206_B);
tcg_out_insn(s, 3207, BR, TCG_REG_TMP); tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
set_jmp_reset_offset(s, which); set_jmp_reset_offset(s, which);
} }
@ -1866,7 +1866,7 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
ptrdiff_t i_offset = i_addr - jmp_rx; ptrdiff_t i_offset = i_addr - jmp_rx;
/* Note that we asserted this in range in tcg_out_goto_tb. */ /* Note that we asserted this in range in tcg_out_goto_tb. */
insn = deposit32(I3305_LDR | TCG_REG_TMP, 5, 19, i_offset >> 2); insn = deposit32(I3305_LDR | TCG_REG_TMP0, 5, 19, i_offset >> 2);
} }
qatomic_set((uint32_t *)jmp_rw, insn); qatomic_set((uint32_t *)jmp_rw, insn);
flush_idcache_range(jmp_rx, jmp_rw, 4); flush_idcache_range(jmp_rx, jmp_rw, 4);
@ -2060,13 +2060,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_rem_i64: case INDEX_op_rem_i64:
case INDEX_op_rem_i32: case INDEX_op_rem_i32:
tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP0, a1, a2);
tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1);
break; break;
case INDEX_op_remu_i64: case INDEX_op_remu_i64:
case INDEX_op_remu_i32: case INDEX_op_remu_i32:
tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP0, a1, a2);
tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1);
break; break;
case INDEX_op_shl_i64: case INDEX_op_shl_i64:
@ -2110,8 +2110,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) { if (c2) {
tcg_out_rotl(s, ext, a0, a1, a2); tcg_out_rotl(s, ext, a0, a1, a2);
} else { } else {
tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP0, TCG_REG_XZR, a2);
tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP0);
} }
break; break;
@ -2517,8 +2517,8 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
break; break;
} }
} }
tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP0, 0);
a2 = TCG_VEC_TMP; a2 = TCG_VEC_TMP0;
} }
if (is_scalar) { if (is_scalar) {
insn = cmp_scalar_insn[cond]; insn = cmp_scalar_insn[cond];
@ -2900,9 +2900,9 @@ static void tcg_target_init(TCGContext *s)
s->reserved_regs = 0; s->reserved_regs = 0;
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
} }
/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */