tcg: Introduce set_jmp_insn_offset

Similar to the existing set_jmp_reset_offset.  Move any assert for
TCG_TARGET_HAS_direct_jump into the new function (which now cannot
be build-time).  Will be unused if TCG_TARGET_HAS_direct_jump is
constant 0, but we can't test for constant in the preprocessor,
so just mark it G_GNUC_UNUSED.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-11-26 15:18:44 -08:00
parent 7f83167c61
commit b52a2c03b7
7 changed files with 19 additions and 14 deletions

View File

@ -1918,7 +1918,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) { switch (opc) {
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/* /*
* Ensure that ADRP+ADD are 8-byte aligned so that an atomic * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
* write can be used to patch the target address. * write can be used to patch the target address.
@ -1926,7 +1925,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if ((uintptr_t)s->code_ptr & 7) { if ((uintptr_t)s->code_ptr & 7) {
tcg_out32(s, NOP); tcg_out32(s, NOP);
} }
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); set_jmp_insn_offset(s, a0);
/* /*
* actual branch destination will be patched by * actual branch destination will be patched by
* tb_target_set_jmp_target later * tb_target_set_jmp_target later

View File

@ -2383,7 +2383,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) { switch (opc) {
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
qemu_build_assert(TCG_TARGET_HAS_direct_jump);
{ {
/* /*
* Jump displacement must be aligned for atomic patching; * Jump displacement must be aligned for atomic patching;
@ -2394,7 +2393,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_nopn(s, gap - 1); tcg_out_nopn(s, gap - 1);
} }
tcg_out8(s, OPC_JMP_long); /* jmp im */ tcg_out8(s, OPC_JMP_long); /* jmp im */
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); set_jmp_insn_offset(s, a0);
tcg_out32(s, 0); tcg_out32(s, 0);
} }
set_jmp_reset_offset(s, a0); set_jmp_reset_offset(s, a0);

View File

@ -1090,7 +1090,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) { switch (opc) {
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/* /*
* Ensure that patch area is 8-byte aligned so that an * Ensure that patch area is 8-byte aligned so that an
* atomic write can be used to patch the target address. * atomic write can be used to patch the target address.
@ -1098,7 +1097,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if ((uintptr_t)s->code_ptr & 7) { if ((uintptr_t)s->code_ptr & 7) {
tcg_out_nop(s); tcg_out_nop(s);
} }
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); set_jmp_insn_offset(s, a0);
/* /*
* actual branch destination will be patched by * actual branch destination will be patched by
* tb_target_set_jmp_target later * tb_target_set_jmp_target later

View File

@ -2630,20 +2630,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) { switch (opc) {
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/* Direct jump. */ /* Direct jump. */
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
/* Ensure the next insns are 8 or 16-byte aligned. */ /* Ensure the next insns are 8 or 16-byte aligned. */
while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) { while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
tcg_out32(s, NOP); tcg_out32(s, NOP);
} }
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); set_jmp_insn_offset(s, args[0]);
tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0)); tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0)); tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
} else { } else {
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); set_jmp_insn_offset(s, args[0]);
tcg_out32(s, B); tcg_out32(s, B);
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); set_jmp_reset_offset(s, args[0]);
break; break;
} }
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR); tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);

View File

@ -1977,7 +1977,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out16(s, NOP); tcg_out16(s, NOP);
} }
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); set_jmp_insn_offset(s, a0);
s->code_ptr += 2; s->code_ptr += 2;
set_jmp_reset_offset(s, a0); set_jmp_reset_offset(s, a0);
break; break;

View File

@ -1452,20 +1452,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) { switch (opc) {
case INDEX_op_goto_tb: case INDEX_op_goto_tb:
qemu_build_assert(TCG_TARGET_HAS_direct_jump);
/* Direct jump. */ /* Direct jump. */
if (USE_REG_TB) { if (USE_REG_TB) {
/* make sure the patch is 8-byte aligned. */ /* make sure the patch is 8-byte aligned. */
if ((intptr_t)s->code_ptr & 4) { if ((intptr_t)s->code_ptr & 4) {
tcg_out_nop(s); tcg_out_nop(s);
} }
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); set_jmp_insn_offset(s, a0);
tcg_out_sethi(s, TCG_REG_T1, 0); tcg_out_sethi(s, TCG_REG_T1, 0);
tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR); tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL); tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
} else { } else {
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); set_jmp_insn_offset(s, a0);
tcg_out32(s, CALL); tcg_out32(s, CALL);
tcg_out_nop(s); tcg_out_nop(s);
} }

View File

@ -313,6 +313,16 @@ static void set_jmp_reset_offset(TCGContext *s, int which)
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s); s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
} }
static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
{
/*
* We will check for overflow at the end of the opcode loop in
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
*/
tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
}
/* Signal overflow, starting over with fewer guest insns. */ /* Signal overflow, starting over with fewer guest insns. */
static G_NORETURN static G_NORETURN
void tcg_raise_tb_overflow(TCGContext *s) void tcg_raise_tb_overflow(TCGContext *s)