tcg/arm: remove conditional argument for qemu_ld/st

While it make sense to pass a conditional argument to tcg_out_*()
functions as the ARM architecture allows that, it doesn't make sense
for qemu_ld/st functions. These functions use comparison instructions
and conditional execution already, so it is not possible to use a
second level of conditional execution.

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
This commit is contained in:
Aurelien Jarno 2010-04-09 20:52:48 +02:00
parent 244b1e81f6
commit 7e0d95628d
1 changed files with 49 additions and 51 deletions

View File

@ -904,8 +904,7 @@ static void *qemu_st_helpers[4] = {
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
static inline void tcg_out_qemu_ld(TCGContext *s, int cond, static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
const TCGArg *args, int opc)
{ {
int addr_reg, data_reg, data_reg2; int addr_reg, data_reg, data_reg2;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
@ -1001,32 +1000,32 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
/* TODO: move this code to where the constants pool will be */ /* TODO: move this code to where the constants pool will be */
if (addr_reg != TCG_REG_R0) { if (addr_reg != TCG_REG_R0) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0)); TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
} }
# if TARGET_LONG_BITS == 32 # if TARGET_LONG_BITS == 32
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R1, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
# else # else
if (addr_reg2 != TCG_REG_R1) { if (addr_reg2 != TCG_REG_R1) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
} }
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
# endif # endif
tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] - tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_ld_helpers[s_bits] -
(tcg_target_long) s->code_ptr); (tcg_target_long) s->code_ptr);
switch (opc) { switch (opc) {
case 0 | 4: case 0 | 4:
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(24)); TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(24));
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(24)); data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(24));
break; break;
case 1 | 4: case 1 | 4:
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(16)); TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(16));
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(16)); data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(16));
break; break;
case 0: case 0:
@ -1034,17 +1033,17 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
case 2: case 2:
default: default:
if (data_reg != TCG_REG_R0) { if (data_reg != TCG_REG_R0) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0)); data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
} }
break; break;
case 3: case 3:
if (data_reg != TCG_REG_R0) { if (data_reg != TCG_REG_R0) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0)); data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
} }
if (data_reg2 != TCG_REG_R1) { if (data_reg2 != TCG_REG_R1) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0)); data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
} }
break; break;
@ -1099,8 +1098,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
#endif #endif
} }
static inline void tcg_out_qemu_st(TCGContext *s, int cond, static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
const TCGArg *args, int opc)
{ {
int addr_reg, data_reg, data_reg2; int addr_reg, data_reg, data_reg2;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
@ -1187,85 +1185,85 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond,
/* TODO: move this code to where the constants pool will be */ /* TODO: move this code to where the constants pool will be */
if (addr_reg != TCG_REG_R0) { if (addr_reg != TCG_REG_R0) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0)); TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
} }
# if TARGET_LONG_BITS == 32 # if TARGET_LONG_BITS == 32
switch (opc) { switch (opc) {
case 0: case 0:
tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R1, data_reg, 0xff); tcg_out_dat_imm(s, COND_AL, ARITH_AND, TCG_REG_R1, data_reg, 0xff);
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
break; break;
case 1: case 1:
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(16)); TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(16));
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R1, 0, TCG_REG_R1, SHIFT_IMM_LSR(16)); TCG_REG_R1, 0, TCG_REG_R1, SHIFT_IMM_LSR(16));
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
break; break;
case 2: case 2:
if (data_reg != TCG_REG_R1) { if (data_reg != TCG_REG_R1) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0)); TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
} }
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
break; break;
case 3: case 3:
if (data_reg != TCG_REG_R1) { if (data_reg != TCG_REG_R1) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0)); TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
} }
if (data_reg2 != TCG_REG_R2) { if (data_reg2 != TCG_REG_R2) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R2, 0, data_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R2, 0, data_reg2, SHIFT_IMM_LSL(0));
} }
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
break; break;
} }
# else # else
if (addr_reg2 != TCG_REG_R1) { if (addr_reg2 != TCG_REG_R1) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
} }
switch (opc) { switch (opc) {
case 0: case 0:
tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R2, data_reg, 0xff); tcg_out_dat_imm(s, COND_AL, ARITH_AND, TCG_REG_R2, data_reg, 0xff);
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
break; break;
case 1: case 1:
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(16)); TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(16));
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R2, 0, TCG_REG_R2, SHIFT_IMM_LSR(16)); TCG_REG_R2, 0, TCG_REG_R2, SHIFT_IMM_LSR(16));
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
break; break;
case 2: case 2:
if (data_reg != TCG_REG_R2) { if (data_reg != TCG_REG_R2) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0)); TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
} }
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
break; break;
case 3: case 3:
tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R8, 0, mem_index); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */ tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
if (data_reg != TCG_REG_R2) { if (data_reg != TCG_REG_R2) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0)); TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
} }
if (data_reg2 != TCG_REG_R3) { if (data_reg2 != TCG_REG_R3) {
tcg_out_dat_reg(s, cond, ARITH_MOV, tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
} }
break; break;
} }
# endif # endif
tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] - tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_st_helpers[s_bits] -
(tcg_target_long) s->code_ptr); (tcg_target_long) s->code_ptr);
# if TARGET_LONG_BITS == 64 # if TARGET_LONG_BITS == 64
if (opc == 3) if (opc == 3)
tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10); tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
# endif # endif
*label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
@ -1545,35 +1543,35 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_qemu_ld8u: case INDEX_op_qemu_ld8u:
tcg_out_qemu_ld(s, COND_AL, args, 0); tcg_out_qemu_ld(s, args, 0);
break; break;
case INDEX_op_qemu_ld8s: case INDEX_op_qemu_ld8s:
tcg_out_qemu_ld(s, COND_AL, args, 0 | 4); tcg_out_qemu_ld(s, args, 0 | 4);
break; break;
case INDEX_op_qemu_ld16u: case INDEX_op_qemu_ld16u:
tcg_out_qemu_ld(s, COND_AL, args, 1); tcg_out_qemu_ld(s, args, 1);
break; break;
case INDEX_op_qemu_ld16s: case INDEX_op_qemu_ld16s:
tcg_out_qemu_ld(s, COND_AL, args, 1 | 4); tcg_out_qemu_ld(s, args, 1 | 4);
break; break;
case INDEX_op_qemu_ld32: case INDEX_op_qemu_ld32:
tcg_out_qemu_ld(s, COND_AL, args, 2); tcg_out_qemu_ld(s, args, 2);
break; break;
case INDEX_op_qemu_ld64: case INDEX_op_qemu_ld64:
tcg_out_qemu_ld(s, COND_AL, args, 3); tcg_out_qemu_ld(s, args, 3);
break; break;
case INDEX_op_qemu_st8: case INDEX_op_qemu_st8:
tcg_out_qemu_st(s, COND_AL, args, 0); tcg_out_qemu_st(s, args, 0);
break; break;
case INDEX_op_qemu_st16: case INDEX_op_qemu_st16:
tcg_out_qemu_st(s, COND_AL, args, 1); tcg_out_qemu_st(s, args, 1);
break; break;
case INDEX_op_qemu_st32: case INDEX_op_qemu_st32:
tcg_out_qemu_st(s, COND_AL, args, 2); tcg_out_qemu_st(s, args, 2);
break; break;
case INDEX_op_qemu_st64: case INDEX_op_qemu_st64:
tcg_out_qemu_st(s, COND_AL, args, 3); tcg_out_qemu_st(s, args, 3);
break; break;
case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i32: