mirror of https://github.com/xemu-project/xemu.git
tcg: Add INDEX_op_trunc_shr_i32
Let the backend do something special for truncation. Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
71b926992e
commit
4bb7a41ed6
|
@ -314,6 +314,11 @@ This operation would be equivalent to
|
|||
|
||||
dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00)
|
||||
|
||||
* trunc_shr_i32 t0, t1, pos
|
||||
|
||||
For 64-bit hosts only, right shift the 64-bit input T1 by POS and
|
||||
truncate to 32-bit output T0. Depending on the host, this may be
|
||||
a simple mov/shift, or may require additional canonicalization.
|
||||
|
||||
********* Conditional moves
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ typedef enum {
|
|||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 1
|
||||
|
|
|
@ -99,6 +99,7 @@ extern bool have_bmi1;
|
|||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_div2_i64 1
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
#define TCG_TARGET_HAS_ext8s_i64 1
|
||||
|
|
|
@ -152,6 +152,7 @@ typedef enum {
|
|||
#define TCG_TARGET_HAS_muluh_i64 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_new_ldst 1
|
||||
|
||||
|
|
|
@ -228,6 +228,7 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
|||
case INDEX_op_shr_i32:
|
||||
return (uint32_t)x >> (y & 31);
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
case INDEX_op_shr_i64:
|
||||
return (uint64_t)x >> (y & 63);
|
||||
|
||||
|
@ -830,6 +831,10 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
|
|||
}
|
||||
break;
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
mask = (uint64_t)temps[args[1]].mask >> args[2];
|
||||
break;
|
||||
|
||||
CASE_OP_32_64(shl):
|
||||
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
|
||||
|
@ -1021,6 +1026,17 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
|
|||
}
|
||||
goto do_default;
|
||||
|
||||
case INDEX_op_trunc_shr_i32:
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST) {
|
||||
s->gen_opc_buf[op_index] = op_to_movi(op);
|
||||
tmp = do_constant_folding(op, temps[args[1]].val, args[2]);
|
||||
tcg_opt_gen_movi(gen_args, args[0], tmp);
|
||||
gen_args += 2;
|
||||
args += 3;
|
||||
break;
|
||||
}
|
||||
goto do_default;
|
||||
|
||||
CASE_OP_32_64(add):
|
||||
CASE_OP_32_64(sub):
|
||||
CASE_OP_32_64(mul):
|
||||
|
|
|
@ -96,6 +96,7 @@ typedef enum {
|
|||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
|
|
|
@ -69,6 +69,7 @@ typedef enum TCGReg {
|
|||
#define TCG_TARGET_HAS_muls2_i32 0
|
||||
#define TCG_TARGET_HAS_muluh_i32 0
|
||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
|
||||
#define TCG_TARGET_HAS_div2_i64 1
|
||||
#define TCG_TARGET_HAS_rot_i64 1
|
||||
|
|
|
@ -117,6 +117,7 @@ typedef enum {
|
|||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_div_i64 1
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_rot_i64 0
|
||||
|
|
50
tcg/tcg-op.h
50
tcg/tcg-op.h
|
@ -1624,9 +1624,20 @@ static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
|
|||
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
|
||||
static inline void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg,
|
||||
unsigned int count)
|
||||
{
|
||||
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
|
||||
tcg_debug_assert(count < 64);
|
||||
if (count >= 32) {
|
||||
tcg_gen_shri_i32(ret, TCGV_HIGH(arg), count - 32);
|
||||
} else if (count == 0) {
|
||||
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
|
||||
} else {
|
||||
TCGv_i64 t = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(t, arg, count);
|
||||
tcg_gen_mov_i32(ret, TCGV_LOW(t));
|
||||
tcg_temp_free_i64(t);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
|
||||
|
@ -1727,11 +1738,21 @@ static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
|
|||
}
|
||||
}
|
||||
|
||||
/* Note: we assume the target supports move between 32 and 64 bit
|
||||
registers. This will probably break MIPS64 targets. */
|
||||
static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
|
||||
static inline void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg,
|
||||
unsigned int count)
|
||||
{
|
||||
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
|
||||
tcg_debug_assert(count < 64);
|
||||
if (TCG_TARGET_HAS_trunc_shr_i32) {
|
||||
tcg_gen_op3i_i32(INDEX_op_trunc_shr_i32, ret,
|
||||
MAKE_TCGV_I32(GET_TCGV_I64(arg)), count);
|
||||
} else if (count == 0) {
|
||||
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
|
||||
} else {
|
||||
TCGv_i64 t = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(t, arg, count);
|
||||
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
|
||||
tcg_temp_free_i64(t);
|
||||
}
|
||||
}
|
||||
|
||||
/* Note: we assume the target supports move between 32 and 64 bit
|
||||
|
@ -2275,18 +2296,15 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 dest, TCGv_i64 low,
|
|||
tcg_gen_deposit_i64(dest, low, high, 32, 32);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
|
||||
{
|
||||
tcg_gen_trunc_shr_i64_i32(ret, arg, 0);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
|
||||
{
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
tcg_gen_mov_i32(lo, TCGV_LOW(arg));
|
||||
tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
|
||||
#else
|
||||
TCGv_i64 t0 = tcg_temp_new_i64();
|
||||
tcg_gen_trunc_i64_i32(lo, arg);
|
||||
tcg_gen_shri_i64(t0, arg, 32);
|
||||
tcg_gen_trunc_i64_i32(hi, t0);
|
||||
tcg_temp_free_i64(t0);
|
||||
#endif
|
||||
tcg_gen_trunc_shr_i64_i32(lo, arg, 0);
|
||||
tcg_gen_trunc_shr_i64_i32(hi, arg, 32);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
|
||||
|
|
|
@ -147,6 +147,10 @@ DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
|||
DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
|
||||
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
|
||||
|
||||
DEF(trunc_shr_i32, 1, 1, 1,
|
||||
IMPL(TCG_TARGET_HAS_trunc_shr_i32)
|
||||
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
|
||||
|
||||
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
|
||||
DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
|
||||
DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
|
||||
|
|
|
@ -66,6 +66,7 @@ typedef uint64_t TCGRegSet;
|
|||
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
/* Turn some undef macros into false macros. */
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_div_i64 0
|
||||
#define TCG_TARGET_HAS_rem_i64 0
|
||||
#define TCG_TARGET_HAS_div2_i64 0
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
#define TCG_TARGET_HAS_trunc_shr_i32 0
|
||||
#define TCG_TARGET_HAS_bswap16_i64 1
|
||||
#define TCG_TARGET_HAS_bswap32_i64 1
|
||||
#define TCG_TARGET_HAS_bswap64_i64 1
|
||||
|
|
Loading…
Reference in New Issue