Improvements to tcg constant handling.

Force utf8 for decodetree.
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl//qU4dHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/Tpgf9EXQZFrmwjQ9FfSfL
 pdqIgTeAmDqr5pIGs84Wy5MZuNldyTqJQRaTYl6Xtv3ZjQcX8TDfwH6KeV/hub58
 L/Ug1X0fRL7ESc3OeiPH77BrLqiOXTjrlHocgqFU7fVVg+rhzxrQ3IvY6PuWH01+
 NBcvQ/Ku8vQeRoTb2lcHB9qreaRXbd0vwCaN/9a+8aiKOqfIMCYK0Z23O9pTf/YW
 x8ksnMF2hdzAxFYDbOWRfgbcJp4P1xpw4lvWSegcodl+yDliznTfCdh+9mYMsga8
 nr1VP6SCcktkg+maPhxeOToBxcPkhymtTVpXCzv/Vnuz4XsGyDSoJDfUVVTvZ/R/
 pycA/g==
 =fNFy
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210113' into staging

Improvements to tcg constant handling.
Force utf8 for decodetree.

# gpg: Signature made Thu 14 Jan 2021 02:15:42 GMT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth-gitlab/tags/pull-tcg-20210113: (24 commits)
  decodetree: Open files with encoding='utf-8'
  tcg/aarch64: Use tcg_constant_vec with tcg vec expanders
  tcg/ppc: Use tcg_constant_vec with tcg vec expanders
  tcg: Remove tcg_gen_dup{8,16,32,64}i_vec
  tcg/i386: Use tcg_constant_vec with tcg vec expanders
  tcg: Add tcg_reg_alloc_dup2
  tcg: Remove movi and dupi opcodes
  tcg/tci: Add special tci_movi_{i32,i64} opcodes
  tcg: Use tcg_constant_{i32,i64,vec} with gvec expanders
  tcg: Use tcg_constant_{i32,i64} with tcg plugins
  tcg: Use tcg_constant_{i32,i64} with tcg int expanders
  tcg: Use tcg_constant_i32 with icount expander
  tcg: Convert tcg_gen_dupi_vec to TCG_CONST
  tcg/optimize: Use tcg_constant_internal with constant folding
  tcg/optimize: Adjust TempOptInfo allocation
  tcg/optimize: Improve find_better_copy
  tcg: Introduce TYPE_CONST temporaries
  tcg: Expand TempOptInfo to 64-bits
  tcg: Rename struct tcg_temp_info to TempOptInfo
  tcg: Expand TCGTemp.val to 64-bits
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-01-14 09:54:29 +00:00
commit 7c79721606
21 changed files with 890 additions and 669 deletions

View File

@ -284,8 +284,8 @@ static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
/* mov_i32 */ /* mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32); op = copy_op(begin_op, op, INDEX_op_mov_i32);
/* movi_i32 */ /* mov_i32 w/ $0 */
op = copy_op(begin_op, op, INDEX_op_movi_i32); op = copy_op(begin_op, op, INDEX_op_mov_i32);
} else { } else {
/* extu_i32_i64 */ /* extu_i32_i64 */
op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
@ -306,39 +306,34 @@ static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
return op; return op;
} }
static TCGOp *copy_movi_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
{
if (TCG_TARGET_REG_BITS == 32) {
/* 2x movi_i32 */
op = copy_op(begin_op, op, INDEX_op_movi_i32);
op->args[1] = v;
op = copy_op(begin_op, op, INDEX_op_movi_i32);
op->args[1] = v >> 32;
} else {
/* movi_i64 */
op = copy_op(begin_op, op, INDEX_op_movi_i64);
op->args[1] = v;
}
return op;
}
static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
{ {
if (UINTPTR_MAX == UINT32_MAX) { if (UINTPTR_MAX == UINT32_MAX) {
/* movi_i32 */ /* mov_i32 */
op = copy_op(begin_op, op, INDEX_op_movi_i32); op = copy_op(begin_op, op, INDEX_op_mov_i32);
op->args[1] = (uintptr_t)ptr; op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
} else { } else {
/* movi_i64 */ /* mov_i64 */
op = copy_movi_i64(begin_op, op, (uint64_t)(uintptr_t)ptr); op = copy_op(begin_op, op, INDEX_op_mov_i64);
op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
} }
return op; return op;
} }
static TCGOp *copy_const_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) static TCGOp *copy_const_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
{ {
return copy_movi_i64(begin_op, op, v); if (TCG_TARGET_REG_BITS == 32) {
/* 2x mov_i32 */
op = copy_op(begin_op, op, INDEX_op_mov_i32);
op->args[1] = tcgv_i32_arg(tcg_constant_i32(v));
op = copy_op(begin_op, op, INDEX_op_mov_i32);
op->args[1] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
} else {
/* mov_i64 */
op = copy_op(begin_op, op, INDEX_op_mov_i64);
op->args[1] = tcgv_i64_arg(tcg_constant_i64(v));
}
return op;
} }
static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
@ -486,8 +481,8 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
/* const_i32 == movi_i32 ("info", so it remains as is) */ /* const_i32 == mov_i32 ("info", so it remains as is) */
op = copy_op(&begin_op, op, INDEX_op_movi_i32); op = copy_op(&begin_op, op, INDEX_op_mov_i32);
/* const_ptr */ /* const_ptr */
op = copy_const_ptr(&begin_op, op, cb->userp); op = copy_const_ptr(&begin_op, op, cb->userp);

View File

@ -34,7 +34,7 @@ static inline void gen_io_end(void)
static inline void gen_tb_start(const TranslationBlock *tb) static inline void gen_tb_start(const TranslationBlock *tb)
{ {
TCGv_i32 count, imm; TCGv_i32 count;
tcg_ctx->exitreq_label = gen_new_label(); tcg_ctx->exitreq_label = gen_new_label();
if (tb_cflags(tb) & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
@ -48,15 +48,13 @@ static inline void gen_tb_start(const TranslationBlock *tb)
offsetof(ArchCPU, env)); offsetof(ArchCPU, env));
if (tb_cflags(tb) & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
imm = tcg_temp_new_i32(); /*
/* We emit a movi with a dummy immediate argument. Keep the insn index * We emit a sub with a dummy immediate argument. Keep the insn index
* of the movi so that we later (when we know the actual insn count) * of the sub so that we later (when we know the actual insn count)
* can update the immediate argument with the actual insn count. */ * can update the argument with the actual insn count.
tcg_gen_movi_i32(imm, 0xdeadbeef); */
tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
icount_start_insn = tcg_last_op(); icount_start_insn = tcg_last_op();
tcg_gen_sub_i32(count, count, imm);
tcg_temp_free_i32(imm);
} }
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
@ -74,9 +72,12 @@ static inline void gen_tb_start(const TranslationBlock *tb)
static inline void gen_tb_end(const TranslationBlock *tb, int num_insns) static inline void gen_tb_end(const TranslationBlock *tb, int num_insns)
{ {
if (tb_cflags(tb) & CF_USE_ICOUNT) { if (tb_cflags(tb) & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know /*
* the actual insn count. */ * Update the num_insn immediate parameter now that we know
tcg_set_insn_param(icount_start_insn, 1, num_insns); * the actual insn count.
*/
tcg_set_insn_param(icount_start_insn, 2,
tcgv_i32_arg(tcg_constant_i32(num_insns)));
} }
gen_set_label(tcg_ctx->exitreq_label); gen_set_label(tcg_ctx->exitreq_label);

View File

@ -271,6 +271,7 @@ void tcg_gen_mb(TCGBar);
/* 32 bit ops */ /* 32 bit ops */
void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg);
void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2); void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
@ -349,11 +350,6 @@ static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
} }
} }
static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
{
tcg_gen_op2i_i32(INDEX_op_movi_i32, ret, arg);
}
static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2,
tcg_target_long offset) tcg_target_long offset)
{ {
@ -467,6 +463,7 @@ static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
/* 64 bit ops */ /* 64 bit ops */
void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2); void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
@ -550,11 +547,6 @@ static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
} }
} }
static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
{
tcg_gen_op2i_i64(INDEX_op_movi_i64, ret, arg);
}
static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2,
tcg_target_long offset) tcg_target_long offset)
{ {
@ -698,7 +690,6 @@ static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_discard_i64(TCGv_i64 arg); void tcg_gen_discard_i64(TCGv_i64 arg);
void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg); void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
@ -968,10 +959,6 @@ void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64); void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long); void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
void tcg_gen_dup8i_vec(TCGv_vec, uint32_t);
void tcg_gen_dup16i_vec(TCGv_vec, uint32_t);
void tcg_gen_dup32i_vec(TCGv_vec, uint32_t);
void tcg_gen_dup64i_vec(TCGv_vec, uint64_t);
void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t); void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t);
void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);

View File

@ -45,7 +45,6 @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END)
DEF(mb, 0, 0, 1, 0) DEF(mb, 0, 0, 1, 0)
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(setcond_i32, 1, 2, 1, 0) DEF(setcond_i32, 1, 2, 1, 0)
DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32)) DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32))
/* load/store */ /* load/store */
@ -111,7 +110,6 @@ DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32)) DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
DEF(setcond_i64, 1, 2, 1, IMPL64) DEF(setcond_i64, 1, 2, 1, IMPL64)
DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64)) DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64))
/* load/store */ /* load/store */
@ -221,7 +219,6 @@ DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1,
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) #define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
DEF(dupi_vec, 1, 0, 1, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
DEF(dup_vec, 1, 1, 0, IMPLVEC) DEF(dup_vec, 1, 1, 0, IMPLVEC)
DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32)) DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
@ -278,6 +275,14 @@ DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
#include "tcg-target.opc.h" #include "tcg-target.opc.h"
#endif #endif
#ifdef TCG_TARGET_INTERPRETER
/* These opcodes are only for use between the tci generator and interpreter. */
DEF(tci_movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT)
#if TCG_TARGET_REG_BITS == 64
DEF(tci_movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
#endif
#endif
#undef TLADDR_ARGS #undef TLADDR_ARGS
#undef DATA64_ARGS #undef DATA64_ARGS
#undef IMPL #undef IMPL

View File

@ -483,26 +483,32 @@ typedef enum TCGTempVal {
TEMP_VAL_CONST, TEMP_VAL_CONST,
} TCGTempVal; } TCGTempVal;
typedef enum TCGTempKind {
/* Temp is dead at the end of all basic blocks. */
TEMP_NORMAL,
/* Temp is saved across basic blocks but dead at the end of TBs. */
TEMP_LOCAL,
/* Temp is saved across both basic blocks and translation blocks. */
TEMP_GLOBAL,
/* Temp is in a fixed register. */
TEMP_FIXED,
/* Temp is a fixed constant. */
TEMP_CONST,
} TCGTempKind;
typedef struct TCGTemp { typedef struct TCGTemp {
TCGReg reg:8; TCGReg reg:8;
TCGTempVal val_type:8; TCGTempVal val_type:8;
TCGType base_type:8; TCGType base_type:8;
TCGType type:8; TCGType type:8;
unsigned int fixed_reg:1; TCGTempKind kind:3;
unsigned int indirect_reg:1; unsigned int indirect_reg:1;
unsigned int indirect_base:1; unsigned int indirect_base:1;
unsigned int mem_coherent:1; unsigned int mem_coherent:1;
unsigned int mem_allocated:1; unsigned int mem_allocated:1;
/* If true, the temp is saved across both basic blocks and
translation blocks. */
unsigned int temp_global:1;
/* If true, the temp is saved across basic blocks but dead
at the end of translation blocks. If false, the temp is
dead at the end of basic blocks. */
unsigned int temp_local:1;
unsigned int temp_allocated:1; unsigned int temp_allocated:1;
tcg_target_long val; int64_t val;
struct TCGTemp *mem_base; struct TCGTemp *mem_base;
intptr_t mem_offset; intptr_t mem_offset;
const char *name; const char *name;
@ -661,6 +667,7 @@ struct TCGContext {
QSIMPLEQ_HEAD(, TCGOp) plugin_ops; QSIMPLEQ_HEAD(, TCGOp) plugin_ops;
#endif #endif
GHashTable *const_table[TCG_TYPE_COUNT];
TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
@ -675,6 +682,11 @@ struct TCGContext {
target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
}; };
static inline bool temp_readonly(TCGTemp *ts)
{
return ts->kind >= TEMP_FIXED;
}
extern TCGContext tcg_init_ctx; extern TCGContext tcg_init_ctx;
extern __thread TCGContext *tcg_ctx; extern __thread TCGContext *tcg_ctx;
extern const void *tcg_code_gen_epilogue; extern const void *tcg_code_gen_epilogue;
@ -1070,6 +1082,7 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
void tcg_optimize(TCGContext *s); void tcg_optimize(TCGContext *s);
/* Allocate a new temporary and initialize it with a constant. */
TCGv_i32 tcg_const_i32(int32_t val); TCGv_i32 tcg_const_i32(int32_t val);
TCGv_i64 tcg_const_i64(int64_t val); TCGv_i64 tcg_const_i64(int64_t val);
TCGv_i32 tcg_const_local_i32(int32_t val); TCGv_i32 tcg_const_local_i32(int32_t val);
@ -1079,6 +1092,25 @@ TCGv_vec tcg_const_ones_vec(TCGType);
TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec); TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
TCGv_vec tcg_const_ones_vec_matching(TCGv_vec); TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
/*
* Locate or create a read-only temporary that is a constant.
* This kind of temporary need not and should not be freed.
*/
TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
static inline TCGv_i32 tcg_constant_i32(int32_t val)
{
return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
}
static inline TCGv_i64 tcg_constant_i64(int64_t val)
{
return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
}
TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
#if UINTPTR_MAX == UINT32_MAX #if UINTPTR_MAX == UINT32_MAX
# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))

View File

@ -20,6 +20,7 @@
# See the syntax and semantics in docs/devel/decodetree.rst. # See the syntax and semantics in docs/devel/decodetree.rst.
# #
import io
import os import os
import re import re
import sys import sys
@ -1304,7 +1305,7 @@ def main():
for filename in args: for filename in args:
input_file = filename input_file = filename
f = open(filename, 'r') f = open(filename, 'rt', encoding='utf-8')
parse_file(f, toppat) parse_file(f, toppat)
f.close() f.close()
@ -1324,9 +1325,11 @@ def main():
prop_size(stree) prop_size(stree)
if output_file: if output_file:
output_fd = open(output_file, 'w') output_fd = open(output_file, 'wt', encoding='utf-8')
else: else:
output_fd = sys.stdout output_fd = io.TextIOWrapper(sys.stdout.buffer,
encoding=sys.stdout.encoding,
errors="ignore")
output_autogen() output_autogen()
for n in sorted(arguments.keys()): for n in sorted(arguments.keys()):

View File

@ -857,14 +857,14 @@ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c);
} }
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg rd, tcg_target_long v64) TCGReg rd, int64_t v64)
{ {
bool q = type == TCG_TYPE_V128; bool q = type == TCG_TYPE_V128;
int cmode, imm8, i; int cmode, imm8, i;
/* Test all bytes equal first. */ /* Test all bytes equal first. */
if (v64 == dup_const(MO_8, v64)) { if (vece == MO_8) {
imm8 = (uint8_t)v64; imm8 = (uint8_t)v64;
tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8); tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8);
return; return;
@ -891,7 +891,7 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
* cannot find an expansion there's no point checking a larger * cannot find an expansion there's no point checking a larger
* width because we already know by replication it cannot match. * width because we already know by replication it cannot match.
*/ */
if (v64 == dup_const(MO_16, v64)) { if (vece == MO_16) {
uint16_t v16 = v64; uint16_t v16 = v64;
if (is_shimm16(v16, &cmode, &imm8)) { if (is_shimm16(v16, &cmode, &imm8)) {
@ -910,7 +910,7 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff); tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff);
tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8); tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8);
return; return;
} else if (v64 == dup_const(MO_32, v64)) { } else if (vece == MO_32) {
uint32_t v32 = v64; uint32_t v32 = v64;
uint32_t n32 = ~v32; uint32_t n32 = ~v32;
@ -1011,13 +1011,6 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
case TCG_TYPE_I64: case TCG_TYPE_I64:
tcg_debug_assert(rd < 32); tcg_debug_assert(rd < 32);
break; break;
case TCG_TYPE_V64:
case TCG_TYPE_V128:
tcg_debug_assert(rd >= 32);
tcg_out_dupi_vec(s, type, rd, value);
return;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -2264,8 +2257,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -2442,7 +2433,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
break; break;
} }
tcg_out_dupi_vec(s, type, TCG_VEC_TMP, 0); tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
a2 = TCG_VEC_TMP; a2 = TCG_VEC_TMP;
} }
insn = cmp_insn[cond]; insn = cmp_insn[cond];
@ -2473,7 +2464,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -2526,7 +2516,7 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...) TCGArg a0, ...)
{ {
va_list va; va_list va;
TCGv_vec v0, v1, v2, t1, t2; TCGv_vec v0, v1, v2, t1, t2, c1;
TCGArg a2; TCGArg a2;
va_start(va, a0); va_start(va, a0);
@ -2558,8 +2548,8 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
case INDEX_op_rotlv_vec: case INDEX_op_rotlv_vec:
t1 = tcg_temp_new_vec(type); t1 = tcg_temp_new_vec(type);
tcg_gen_dupi_vec(vece, t1, 8 << vece); c1 = tcg_constant_vec(type, vece, 8 << vece);
tcg_gen_sub_vec(vece, t1, v2, t1); tcg_gen_sub_vec(vece, t1, v2, c1);
/* Right shifts are negative left shifts for AArch64. */ /* Right shifts are negative left shifts for AArch64. */
vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
tcgv_vec_arg(v1), tcgv_vec_arg(t1)); tcgv_vec_arg(v1), tcgv_vec_arg(t1));
@ -2572,9 +2562,9 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
case INDEX_op_rotrv_vec: case INDEX_op_rotrv_vec:
t1 = tcg_temp_new_vec(type); t1 = tcg_temp_new_vec(type);
t2 = tcg_temp_new_vec(type); t2 = tcg_temp_new_vec(type);
c1 = tcg_constant_vec(type, vece, 8 << vece);
tcg_gen_neg_vec(vece, t1, v2); tcg_gen_neg_vec(vece, t1, v2);
tcg_gen_dupi_vec(vece, t2, 8 << vece); tcg_gen_sub_vec(vece, t2, c1, v2);
tcg_gen_add_vec(vece, t2, t1, t2);
/* Right shifts are negative left shifts for AArch64. */ /* Right shifts are negative left shifts for AArch64. */
vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
tcgv_vec_arg(v1), tcgv_vec_arg(t1)); tcgv_vec_arg(v1), tcgv_vec_arg(t1));

View File

@ -2068,7 +2068,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();

View File

@ -942,8 +942,8 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
return true; return true;
} }
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg ret, tcg_target_long arg) TCGReg ret, int64_t arg)
{ {
int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
@ -956,7 +956,14 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
return; return;
} }
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 32 && vece < MO_64) {
if (have_avx2) {
tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
} else {
tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret);
}
new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
} else {
if (type == TCG_TYPE_V64) { if (type == TCG_TYPE_V64) {
tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret); tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret);
} else if (have_avx2) { } else if (have_avx2) {
@ -964,41 +971,40 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
} else { } else {
tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret); tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
} }
if (TCG_TARGET_REG_BITS == 64) {
new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
} else {
new_pool_l2(s, R_386_32, s->code_ptr - 4, 0, arg, arg >> 32);
}
}
}
static void tcg_out_movi_vec(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg)
{
if (arg == 0) {
tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
return;
}
if (arg == -1) {
tcg_out_vex_modrm(s, OPC_PCMPEQB, ret, ret, ret);
return;
}
int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy + rexw, ret);
if (TCG_TARGET_REG_BITS == 64) {
new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
} else { } else {
if (have_avx2) {
tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
} else {
tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret);
}
new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0); new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
} }
} }
static void tcg_out_movi(TCGContext *s, TCGType type, static void tcg_out_movi_int(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg) TCGReg ret, tcg_target_long arg)
{ {
tcg_target_long diff; tcg_target_long diff;
switch (type) {
case TCG_TYPE_I32:
#if TCG_TARGET_REG_BITS == 64
case TCG_TYPE_I64:
#endif
if (ret < 16) {
break;
}
/* fallthru */
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
tcg_debug_assert(ret >= 16);
tcg_out_dupi_vec(s, type, ret, arg);
return;
default:
g_assert_not_reached();
}
if (arg == 0) { if (arg == 0) {
tgen_arithr(s, ARITH_XOR, ret, ret); tgen_arithr(s, ARITH_XOR, ret, ret);
return; return;
@ -1027,6 +1033,25 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
tcg_out64(s, arg); tcg_out64(s, arg);
} }
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg)
{
switch (type) {
case TCG_TYPE_I32:
#if TCG_TARGET_REG_BITS == 64
case TCG_TYPE_I64:
#endif
if (ret < 16) {
tcg_out_movi_int(s, type, ret, arg);
} else {
tcg_out_movi_vec(s, type, ret, arg);
}
break;
default:
g_assert_not_reached();
}
}
static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
{ {
if (val == (int8_t)val) { if (val == (int8_t)val) {
@ -2641,8 +2666,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();
@ -2928,7 +2951,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -3464,7 +3486,7 @@ static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
static void expand_vec_mul(TCGType type, unsigned vece, static void expand_vec_mul(TCGType type, unsigned vece,
TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) TCGv_vec v0, TCGv_vec v1, TCGv_vec v2)
{ {
TCGv_vec t1, t2, t3, t4; TCGv_vec t1, t2, t3, t4, zero;
tcg_debug_assert(vece == MO_8); tcg_debug_assert(vece == MO_8);
@ -3482,11 +3504,11 @@ static void expand_vec_mul(TCGType type, unsigned vece,
case TCG_TYPE_V64: case TCG_TYPE_V64:
t1 = tcg_temp_new_vec(TCG_TYPE_V128); t1 = tcg_temp_new_vec(TCG_TYPE_V128);
t2 = tcg_temp_new_vec(TCG_TYPE_V128); t2 = tcg_temp_new_vec(TCG_TYPE_V128);
tcg_gen_dup16i_vec(t2, 0); zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0);
vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(t2)); tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
tcgv_vec_arg(t2), tcgv_vec_arg(t2), tcgv_vec_arg(v2)); tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
tcg_gen_mul_vec(MO_16, t1, t1, t2); tcg_gen_mul_vec(MO_16, t1, t1, t2);
tcg_gen_shri_vec(MO_16, t1, t1, 8); tcg_gen_shri_vec(MO_16, t1, t1, 8);
vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8, vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
@ -3501,15 +3523,15 @@ static void expand_vec_mul(TCGType type, unsigned vece,
t2 = tcg_temp_new_vec(type); t2 = tcg_temp_new_vec(type);
t3 = tcg_temp_new_vec(type); t3 = tcg_temp_new_vec(type);
t4 = tcg_temp_new_vec(type); t4 = tcg_temp_new_vec(type);
tcg_gen_dup16i_vec(t4, 0); zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0);
vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8, vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(t4)); tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8, vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
tcgv_vec_arg(t2), tcgv_vec_arg(t4), tcgv_vec_arg(v2)); tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8, vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(t4)); tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8, vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
tcgv_vec_arg(t4), tcgv_vec_arg(t4), tcgv_vec_arg(v2)); tcgv_vec_arg(t4), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
tcg_gen_mul_vec(MO_16, t1, t1, t2); tcg_gen_mul_vec(MO_16, t1, t1, t2);
tcg_gen_mul_vec(MO_16, t3, t3, t4); tcg_gen_mul_vec(MO_16, t3, t3, t4);
tcg_gen_shri_vec(MO_16, t1, t1, 8); tcg_gen_shri_vec(MO_16, t1, t1, 8);
@ -3537,7 +3559,7 @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
NEED_UMIN = 8, NEED_UMIN = 8,
NEED_UMAX = 16, NEED_UMAX = 16,
}; };
TCGv_vec t1, t2; TCGv_vec t1, t2, t3;
uint8_t fixup; uint8_t fixup;
switch (cond) { switch (cond) {
@ -3608,9 +3630,9 @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
} else if (fixup & NEED_BIAS) { } else if (fixup & NEED_BIAS) {
t1 = tcg_temp_new_vec(type); t1 = tcg_temp_new_vec(type);
t2 = tcg_temp_new_vec(type); t2 = tcg_temp_new_vec(type);
tcg_gen_dupi_vec(vece, t2, 1ull << ((8 << vece) - 1)); t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1));
tcg_gen_sub_vec(vece, t1, v1, t2); tcg_gen_sub_vec(vece, t1, v1, t3);
tcg_gen_sub_vec(vece, t2, v2, t2); tcg_gen_sub_vec(vece, t2, v2, t3);
v1 = t1; v1 = t1;
v2 = t2; v2 = t2;
cond = tcg_signed_cond(cond); cond = tcg_signed_cond(cond);

View File

@ -2141,8 +2141,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break; break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();

View File

@ -35,20 +35,20 @@
glue(glue(case INDEX_op_, x), _i64): \ glue(glue(case INDEX_op_, x), _i64): \
glue(glue(case INDEX_op_, x), _vec) glue(glue(case INDEX_op_, x), _vec)
struct tcg_temp_info { typedef struct TempOptInfo {
bool is_const; bool is_const;
TCGTemp *prev_copy; TCGTemp *prev_copy;
TCGTemp *next_copy; TCGTemp *next_copy;
tcg_target_ulong val; uint64_t val;
tcg_target_ulong mask; uint64_t mask;
}; } TempOptInfo;
static inline struct tcg_temp_info *ts_info(TCGTemp *ts) static inline TempOptInfo *ts_info(TCGTemp *ts)
{ {
return ts->state_ptr; return ts->state_ptr;
} }
static inline struct tcg_temp_info *arg_info(TCGArg arg) static inline TempOptInfo *arg_info(TCGArg arg)
{ {
return ts_info(arg_temp(arg)); return ts_info(arg_temp(arg));
} }
@ -71,9 +71,9 @@ static inline bool ts_is_copy(TCGTemp *ts)
/* Reset TEMP's state, possibly removing the temp for the list of copies. */ /* Reset TEMP's state, possibly removing the temp for the list of copies. */
static void reset_ts(TCGTemp *ts) static void reset_ts(TCGTemp *ts)
{ {
struct tcg_temp_info *ti = ts_info(ts); TempOptInfo *ti = ts_info(ts);
struct tcg_temp_info *pi = ts_info(ti->prev_copy); TempOptInfo *pi = ts_info(ti->prev_copy);
struct tcg_temp_info *ni = ts_info(ti->next_copy); TempOptInfo *ni = ts_info(ti->next_copy);
ni->prev_copy = ti->prev_copy; ni->prev_copy = ti->prev_copy;
pi->next_copy = ti->next_copy; pi->next_copy = ti->next_copy;
@ -89,55 +89,67 @@ static void reset_temp(TCGArg arg)
} }
/* Initialize and activate a temporary. */ /* Initialize and activate a temporary. */
static void init_ts_info(struct tcg_temp_info *infos, static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
TCGTempSet *temps_used, TCGTemp *ts)
{ {
size_t idx = temp_idx(ts); size_t idx = temp_idx(ts);
if (!test_bit(idx, temps_used->l)) { TempOptInfo *ti;
struct tcg_temp_info *ti = &infos[idx];
if (test_bit(idx, temps_used->l)) {
return;
}
set_bit(idx, temps_used->l);
ti = ts->state_ptr;
if (ti == NULL) {
ti = tcg_malloc(sizeof(TempOptInfo));
ts->state_ptr = ti; ts->state_ptr = ti;
ti->next_copy = ts; }
ti->prev_copy = ts;
ti->next_copy = ts;
ti->prev_copy = ts;
if (ts->kind == TEMP_CONST) {
ti->is_const = true;
ti->val = ts->val;
ti->mask = ts->val;
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
/* High bits of a 32-bit quantity are garbage. */
ti->mask |= ~0xffffffffull;
}
} else {
ti->is_const = false; ti->is_const = false;
ti->mask = -1; ti->mask = -1;
set_bit(idx, temps_used->l);
} }
} }
static void init_arg_info(struct tcg_temp_info *infos, static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
TCGTempSet *temps_used, TCGArg arg)
{ {
init_ts_info(infos, temps_used, arg_temp(arg)); init_ts_info(temps_used, arg_temp(arg));
} }
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
{ {
TCGTemp *i; TCGTemp *i, *g, *l;
/* If this is already a global, we can't do better. */ /* If this is already readonly, we can't do better. */
if (ts->temp_global) { if (temp_readonly(ts)) {
return ts; return ts;
} }
/* Search for a global first. */ g = l = NULL;
for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
if (i->temp_global) { if (temp_readonly(i)) {
return i; return i;
} } else if (i->kind > ts->kind) {
} if (i->kind == TEMP_GLOBAL) {
g = i;
/* If it is a temp, search for a temp local. */ } else if (i->kind == TEMP_LOCAL) {
if (!ts->temp_local) { l = i;
for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
if (ts->temp_local) {
return i;
} }
} }
} }
/* Failure to find a better representation, return the same temp. */ /* If we didn't find a better representation, return the same temp. */
return ts; return g ? g : l ? l : ts;
} }
static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
@ -166,45 +178,14 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
} }
static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val)
{
const TCGOpDef *def;
TCGOpcode new_op;
tcg_target_ulong mask;
struct tcg_temp_info *di = arg_info(dst);
def = &tcg_op_defs[op->opc];
if (def->flags & TCG_OPF_VECTOR) {
new_op = INDEX_op_dupi_vec;
} else if (def->flags & TCG_OPF_64BIT) {
new_op = INDEX_op_movi_i64;
} else {
new_op = INDEX_op_movi_i32;
}
op->opc = new_op;
/* TCGOP_VECL and TCGOP_VECE remain unchanged. */
op->args[0] = dst;
op->args[1] = val;
reset_temp(dst);
di->is_const = true;
di->val = val;
mask = val;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) {
/* High bits of the destination are now garbage. */
mask |= ~0xffffffffull;
}
di->mask = mask;
}
static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
{ {
TCGTemp *dst_ts = arg_temp(dst); TCGTemp *dst_ts = arg_temp(dst);
TCGTemp *src_ts = arg_temp(src); TCGTemp *src_ts = arg_temp(src);
const TCGOpDef *def; const TCGOpDef *def;
struct tcg_temp_info *di; TempOptInfo *di;
struct tcg_temp_info *si; TempOptInfo *si;
tcg_target_ulong mask; uint64_t mask;
TCGOpcode new_op; TCGOpcode new_op;
if (ts_are_copies(dst_ts, src_ts)) { if (ts_are_copies(dst_ts, src_ts)) {
@ -236,7 +217,7 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
di->mask = mask; di->mask = mask;
if (src_ts->type == dst_ts->type) { if (src_ts->type == dst_ts->type) {
struct tcg_temp_info *ni = ts_info(si->next_copy); TempOptInfo *ni = ts_info(si->next_copy);
di->next_copy = si->next_copy; di->next_copy = si->next_copy;
di->prev_copy = src_ts; di->prev_copy = src_ts;
@ -247,7 +228,28 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
} }
} }
static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
TCGOp *op, TCGArg dst, uint64_t val)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
TCGType type;
TCGTemp *tv;
if (def->flags & TCG_OPF_VECTOR) {
type = TCGOP_VECL(op) + TCG_TYPE_V64;
} else if (def->flags & TCG_OPF_64BIT) {
type = TCG_TYPE_I64;
} else {
type = TCG_TYPE_I32;
}
/* Convert movi to mov with constant temp. */
tv = tcg_constant_internal(type, val);
init_ts_info(temps_used, tv);
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
}
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
{ {
uint64_t l64, h64; uint64_t l64, h64;
@ -410,10 +412,10 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
} }
} }
static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y) static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
{ {
const TCGOpDef *def = &tcg_op_defs[op]; const TCGOpDef *def = &tcg_op_defs[op];
TCGArg res = do_constant_folding_2(op, x, y); uint64_t res = do_constant_folding_2(op, x, y);
if (!(def->flags & TCG_OPF_64BIT)) { if (!(def->flags & TCG_OPF_64BIT)) {
res = (int32_t)res; res = (int32_t)res;
} }
@ -501,8 +503,9 @@ static bool do_constant_folding_cond_eq(TCGCond c)
static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
TCGArg y, TCGCond c) TCGArg y, TCGCond c)
{ {
tcg_target_ulong xv = arg_info(x)->val; uint64_t xv = arg_info(x)->val;
tcg_target_ulong yv = arg_info(y)->val; uint64_t yv = arg_info(y)->val;
if (arg_is_const(x) && arg_is_const(y)) { if (arg_is_const(x) && arg_is_const(y)) {
const TCGOpDef *def = &tcg_op_defs[op]; const TCGOpDef *def = &tcg_op_defs[op];
tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
@ -597,9 +600,8 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
/* Propagate constants and copies, fold constant expressions. */ /* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s) void tcg_optimize(TCGContext *s)
{ {
int nb_temps, nb_globals; int nb_temps, nb_globals, i;
TCGOp *op, *op_next, *prev_mb = NULL; TCGOp *op, *op_next, *prev_mb = NULL;
struct tcg_temp_info *infos;
TCGTempSet temps_used; TCGTempSet temps_used;
/* Array VALS has an element for each temp. /* Array VALS has an element for each temp.
@ -609,13 +611,15 @@ void tcg_optimize(TCGContext *s)
nb_temps = s->nb_temps; nb_temps = s->nb_temps;
nb_globals = s->nb_globals; nb_globals = s->nb_globals;
bitmap_zero(temps_used.l, nb_temps);
infos = tcg_malloc(sizeof(struct tcg_temp_info) * nb_temps); memset(&temps_used, 0, sizeof(temps_used));
for (i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
tcg_target_ulong mask, partmask, affected; uint64_t mask, partmask, affected, tmp;
int nb_oargs, nb_iargs, i; int nb_oargs, nb_iargs;
TCGArg tmp;
TCGOpcode opc = op->opc; TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc]; const TCGOpDef *def = &tcg_op_defs[opc];
@ -627,14 +631,14 @@ void tcg_optimize(TCGContext *s)
for (i = 0; i < nb_oargs + nb_iargs; i++) { for (i = 0; i < nb_oargs + nb_iargs; i++) {
TCGTemp *ts = arg_temp(op->args[i]); TCGTemp *ts = arg_temp(op->args[i]);
if (ts) { if (ts) {
init_ts_info(infos, &temps_used, ts); init_ts_info(&temps_used, ts);
} }
} }
} else { } else {
nb_oargs = def->nb_oargs; nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs; nb_iargs = def->nb_iargs;
for (i = 0; i < nb_oargs + nb_iargs; i++) { for (i = 0; i < nb_oargs + nb_iargs; i++) {
init_arg_info(infos, &temps_used, op->args[i]); init_arg_info(&temps_used, op->args[i]);
} }
} }
@ -713,7 +717,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(rotr): CASE_OP_32_64(rotr):
if (arg_is_const(op->args[1]) if (arg_is_const(op->args[1])
&& arg_info(op->args[1])->val == 0) { && arg_info(op->args[1])->val == 0) {
tcg_opt_gen_movi(s, op, op->args[0], 0); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue; continue;
} }
break; break;
@ -1040,7 +1044,7 @@ void tcg_optimize(TCGContext *s)
if (partmask == 0) { if (partmask == 0) {
tcg_debug_assert(nb_oargs == 1); tcg_debug_assert(nb_oargs == 1);
tcg_opt_gen_movi(s, op, op->args[0], 0); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue; continue;
} }
if (affected == 0) { if (affected == 0) {
@ -1057,7 +1061,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulsh): CASE_OP_32_64(mulsh):
if (arg_is_const(op->args[2]) if (arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) { && arg_info(op->args[2])->val == 0) {
tcg_opt_gen_movi(s, op, op->args[0], 0); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue; continue;
} }
break; break;
@ -1084,7 +1088,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub): CASE_OP_32_64_VEC(sub):
CASE_OP_32_64_VEC(xor): CASE_OP_32_64_VEC(xor):
if (args_are_copies(op->args[1], op->args[2])) { if (args_are_copies(op->args[1], op->args[2])) {
tcg_opt_gen_movi(s, op, op->args[0], 0); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue; continue;
} }
break; break;
@ -1099,16 +1103,12 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(mov): CASE_OP_32_64_VEC(mov):
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
break; break;
CASE_OP_32_64(movi):
case INDEX_op_dupi_vec:
tcg_opt_gen_movi(s, op, op->args[0], op->args[1]);
break;
case INDEX_op_dup_vec: case INDEX_op_dup_vec:
if (arg_is_const(op->args[1])) { if (arg_is_const(op->args[1])) {
tmp = arg_info(op->args[1])->val; tmp = arg_info(op->args[1])->val;
tmp = dup_const(TCGOP_VECE(op), tmp); tmp = dup_const(TCGOP_VECE(op), tmp);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1116,11 +1116,10 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec: case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32); assert(TCG_TARGET_REG_BITS == 32);
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = arg_info(op->args[1])->val; tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
if (tmp == arg_info(op->args[2])->val) { deposit64(arg_info(op->args[1])->val, 32, 32,
tcg_opt_gen_movi(s, op, op->args[0], tmp); arg_info(op->args[2])->val));
break; break;
}
} else if (args_are_copies(op->args[1], op->args[2])) { } else if (args_are_copies(op->args[1], op->args[2])) {
op->opc = INDEX_op_dup_vec; op->opc = INDEX_op_dup_vec;
TCGOP_VECE(op) = MO_32; TCGOP_VECE(op) = MO_32;
@ -1146,7 +1145,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32: case INDEX_op_extrh_i64_i32:
if (arg_is_const(op->args[1])) { if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1176,7 +1175,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val); arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1187,7 +1186,7 @@ void tcg_optimize(TCGContext *s)
TCGArg v = arg_info(op->args[1])->val; TCGArg v = arg_info(op->args[1])->val;
if (v != 0) { if (v != 0) {
tmp = do_constant_folding(opc, v, 0); tmp = do_constant_folding(opc, v, 0);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
} else { } else {
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
} }
@ -1200,7 +1199,7 @@ void tcg_optimize(TCGContext *s)
tmp = deposit64(arg_info(op->args[1])->val, tmp = deposit64(arg_info(op->args[1])->val,
op->args[3], op->args[4], op->args[3], op->args[4],
arg_info(op->args[2])->val); arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1209,7 +1208,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) { if (arg_is_const(op->args[1])) {
tmp = extract64(arg_info(op->args[1])->val, tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]); op->args[2], op->args[3]);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1218,23 +1217,24 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) { if (arg_is_const(op->args[1])) {
tmp = sextract64(arg_info(op->args[1])->val, tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]); op->args[2], op->args[3]);
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
CASE_OP_32_64(extract2): CASE_OP_32_64(extract2):
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
TCGArg v1 = arg_info(op->args[1])->val; uint64_t v1 = arg_info(op->args[1])->val;
TCGArg v2 = arg_info(op->args[2])->val; uint64_t v2 = arg_info(op->args[2])->val;
int shr = op->args[3];
if (opc == INDEX_op_extract2_i64) { if (opc == INDEX_op_extract2_i64) {
tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3])); tmp = (v1 >> shr) | (v2 << (64 - shr));
} else { } else {
tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) | tmp = (int32_t)(((uint32_t)v1 >> shr) |
((uint32_t)v2 << (32 - op->args[3]))); ((uint32_t)v2 << (32 - shr)));
} }
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1243,7 +1243,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1], tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]); op->args[2], op->args[3]);
if (tmp != 2) { if (tmp != 2) {
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break; break;
} }
goto do_default; goto do_default;
@ -1253,7 +1253,7 @@ void tcg_optimize(TCGContext *s)
op->args[1], op->args[2]); op->args[1], op->args[2]);
if (tmp != 2) { if (tmp != 2) {
if (tmp) { if (tmp) {
bitmap_zero(temps_used.l, nb_temps); memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_br; op->opc = INDEX_op_br;
op->args[0] = op->args[3]; op->args[0] = op->args[3];
} else { } else {
@ -1271,9 +1271,10 @@ void tcg_optimize(TCGContext *s)
break; break;
} }
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
tcg_target_ulong tv = arg_info(op->args[3])->val; uint64_t tv = arg_info(op->args[3])->val;
tcg_target_ulong fv = arg_info(op->args[4])->val; uint64_t fv = arg_info(op->args[4])->val;
TCGCond cond = op->args[5]; TCGCond cond = op->args[5];
if (fv == 1 && tv == 0) { if (fv == 1 && tv == 0) {
cond = tcg_invert_cond(cond); cond = tcg_invert_cond(cond);
} else if (!(tv == 1 && fv == 0)) { } else if (!(tv == 1 && fv == 0)) {
@ -1298,7 +1299,7 @@ void tcg_optimize(TCGContext *s)
uint64_t a = ((uint64_t)ah << 32) | al; uint64_t a = ((uint64_t)ah << 32) | al;
uint64_t b = ((uint64_t)bh << 32) | bl; uint64_t b = ((uint64_t)bh << 32) | bl;
TCGArg rl, rh; TCGArg rl, rh;
TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
if (opc == INDEX_op_add2_i32) { if (opc == INDEX_op_add2_i32) {
a += b; a += b;
@ -1308,8 +1309,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0]; rl = op->args[0];
rh = op->args[1]; rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)a); tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32)); tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
break; break;
} }
goto do_default; goto do_default;
@ -1320,12 +1321,12 @@ void tcg_optimize(TCGContext *s)
uint32_t b = arg_info(op->args[3])->val; uint32_t b = arg_info(op->args[3])->val;
uint64_t r = (uint64_t)a * b; uint64_t r = (uint64_t)a * b;
TCGArg rl, rh; TCGArg rl, rh;
TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
rl = op->args[0]; rl = op->args[0];
rh = op->args[1]; rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)r); tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32)); tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
break; break;
} }
goto do_default; goto do_default;
@ -1336,7 +1337,7 @@ void tcg_optimize(TCGContext *s)
if (tmp != 2) { if (tmp != 2) {
if (tmp) { if (tmp) {
do_brcond_true: do_brcond_true:
bitmap_zero(temps_used.l, nb_temps); memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_br; op->opc = INDEX_op_br;
op->args[0] = op->args[5]; op->args[0] = op->args[5];
} else { } else {
@ -1352,7 +1353,7 @@ void tcg_optimize(TCGContext *s)
/* Simplify LT/GE comparisons vs zero to a single compare /* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */ vs the high word of the input. */
do_brcond_high: do_brcond_high:
bitmap_zero(temps_used.l, nb_temps); memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_brcond_i32; op->opc = INDEX_op_brcond_i32;
op->args[0] = op->args[1]; op->args[0] = op->args[1];
op->args[1] = op->args[3]; op->args[1] = op->args[3];
@ -1378,7 +1379,7 @@ void tcg_optimize(TCGContext *s)
goto do_default; goto do_default;
} }
do_brcond_low: do_brcond_low:
bitmap_zero(temps_used.l, nb_temps); memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_brcond_i32; op->opc = INDEX_op_brcond_i32;
op->args[1] = op->args[2]; op->args[1] = op->args[2];
op->args[2] = op->args[4]; op->args[2] = op->args[4];
@ -1413,7 +1414,7 @@ void tcg_optimize(TCGContext *s)
op->args[5]); op->args[5]);
if (tmp != 2) { if (tmp != 2) {
do_setcond_const: do_setcond_const:
tcg_opt_gen_movi(s, op, op->args[0], tmp); tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
} else if ((op->args[5] == TCG_COND_LT } else if ((op->args[5] == TCG_COND_LT
|| op->args[5] == TCG_COND_GE) || op->args[5] == TCG_COND_GE)
&& arg_is_const(op->args[3]) && arg_is_const(op->args[3])
@ -1498,7 +1499,7 @@ void tcg_optimize(TCGContext *s)
block, otherwise we only trash the output args. "mask" is block, otherwise we only trash the output args. "mask" is
the non-zero bits mask for the first output arg. */ the non-zero bits mask for the first output arg. */
if (def->flags & TCG_OPF_BB_END) { if (def->flags & TCG_OPF_BB_END) {
bitmap_zero(temps_used.l, nb_temps); memset(&temps_used, 0, sizeof(temps_used));
} else { } else {
do_reset_output: do_reset_output:
for (i = 0; i < nb_oargs; i++) { for (i = 0; i < nb_oargs; i++) {

View File

@ -912,31 +912,41 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
} }
} }
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
tcg_target_long val) TCGReg ret, int64_t val)
{ {
uint32_t load_insn; uint32_t load_insn;
int rel, low; int rel, low;
intptr_t add; intptr_t add;
low = (int8_t)val; switch (vece) {
if (low >= -16 && low < 16) { case MO_8:
if (val == (tcg_target_long)dup_const(MO_8, low)) { low = (int8_t)val;
if (low >= -16 && low < 16) {
tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16)); tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
return; return;
} }
if (val == (tcg_target_long)dup_const(MO_16, low)) { if (have_isa_3_00) {
tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
return;
}
break;
case MO_16:
low = (int16_t)val;
if (low >= -16 && low < 16) {
tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16)); tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
return; return;
} }
if (val == (tcg_target_long)dup_const(MO_32, low)) { break;
case MO_32:
low = (int32_t)val;
if (low >= -16 && low < 16) {
tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16)); tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
return; return;
} }
} break;
if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) {
tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
return;
} }
/* /*
@ -956,14 +966,15 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
new_pool_label(s, val, rel, s->code_ptr, add); new_pool_label(s, val, rel, s->code_ptr, add);
} else { } else {
new_pool_l2(s, rel, s->code_ptr, add, val, val); new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
} }
} else { } else {
load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
if (TCG_TARGET_REG_BITS == 64) { if (TCG_TARGET_REG_BITS == 64) {
new_pool_l2(s, rel, s->code_ptr, add, val, val); new_pool_l2(s, rel, s->code_ptr, add, val, val);
} else { } else {
new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); new_pool_l4(s, rel, s->code_ptr, add,
val >> 32, val, val >> 32, val);
} }
} }
@ -987,12 +998,6 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
tcg_out_movi_int(s, type, ret, arg, false); tcg_out_movi_int(s, type, ret, arg, false);
break; break;
case TCG_TYPE_V64:
case TCG_TYPE_V128:
tcg_debug_assert(ret >= TCG_REG_V0);
tcg_out_dupi_vec(s, type, ret, arg);
break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -2972,8 +2977,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();
@ -3321,7 +3324,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
return; return;
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -3334,13 +3336,22 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0, static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
TCGv_vec v1, TCGArg imm, TCGOpcode opci) TCGv_vec v1, TCGArg imm, TCGOpcode opci)
{ {
TCGv_vec t1 = tcg_temp_new_vec(type); TCGv_vec t1;
/* Splat w/bytes for xxspltib. */ if (vece == MO_32) {
tcg_gen_dupi_vec(MO_8, t1, imm & ((8 << vece) - 1)); /*
* Only 5 bits are significant, and VSPLTISB can represent -16..15.
* So using negative numbers gets us the 4th bit easily.
*/
imm = sextract32(imm, 0, 5);
} else {
imm &= (8 << vece) - 1;
}
/* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
t1 = tcg_constant_vec(type, MO_8, imm);
vec_gen_3(opci, type, vece, tcgv_vec_arg(v0), vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
tcgv_vec_arg(v1), tcgv_vec_arg(t1)); tcgv_vec_arg(v1), tcgv_vec_arg(t1));
tcg_temp_free_vec(t1);
} }
static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
@ -3398,7 +3409,7 @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
{ {
TCGv_vec t1 = tcg_temp_new_vec(type); TCGv_vec t1 = tcg_temp_new_vec(type);
TCGv_vec t2 = tcg_temp_new_vec(type); TCGv_vec t2 = tcg_temp_new_vec(type);
TCGv_vec t3, t4; TCGv_vec c0, c16;
switch (vece) { switch (vece) {
case MO_8: case MO_8:
@ -3417,21 +3428,22 @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
case MO_32: case MO_32:
tcg_debug_assert(!have_isa_2_07); tcg_debug_assert(!have_isa_2_07);
t3 = tcg_temp_new_vec(type); /*
t4 = tcg_temp_new_vec(type); * Only 5 bits are significant, and VSPLTISB can represent -16..15.
tcg_gen_dupi_vec(MO_8, t4, -16); * So using -16 is a quick way to represent 16.
*/
c16 = tcg_constant_vec(type, MO_8, -16);
c0 = tcg_constant_vec(type, MO_8, 0);
vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1), vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
tcgv_vec_arg(v2), tcgv_vec_arg(t4)); tcgv_vec_arg(v2), tcgv_vec_arg(c16));
vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2), vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
tcgv_vec_arg(v1), tcgv_vec_arg(v2)); tcgv_vec_arg(v1), tcgv_vec_arg(v2));
tcg_gen_dupi_vec(MO_8, t3, 0); vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(t3)); vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t3), tcgv_vec_arg(t1), tcgv_vec_arg(c16));
tcgv_vec_arg(t3), tcgv_vec_arg(t4)); tcg_gen_add_vec(MO_32, v0, t1, t2);
tcg_gen_add_vec(MO_32, v0, t2, t3);
tcg_temp_free_vec(t3);
tcg_temp_free_vec(t4);
break; break;
default: default:

View File

@ -1563,8 +1563,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
g_assert_not_reached(); g_assert_not_reached();

View File

@ -2295,8 +2295,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();

View File

@ -1586,8 +1586,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();

View File

@ -115,7 +115,7 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
gen_helper_gvec_2 *fn) gen_helper_gvec_2 *fn)
{ {
TCGv_ptr a0, a1; TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -127,7 +127,6 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
tcg_temp_free_ptr(a0); tcg_temp_free_ptr(a0);
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with two vector operands /* Generate a call to a gvec-style helper with two vector operands
@ -137,7 +136,7 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
gen_helper_gvec_2i *fn) gen_helper_gvec_2i *fn)
{ {
TCGv_ptr a0, a1; TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -149,7 +148,6 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
tcg_temp_free_ptr(a0); tcg_temp_free_ptr(a0);
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with three vector operands. */ /* Generate a call to a gvec-style helper with three vector operands. */
@ -158,7 +156,7 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
gen_helper_gvec_3 *fn) gen_helper_gvec_3 *fn)
{ {
TCGv_ptr a0, a1, a2; TCGv_ptr a0, a1, a2;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -173,7 +171,6 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a0); tcg_temp_free_ptr(a0);
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_ptr(a2); tcg_temp_free_ptr(a2);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with four vector operands. */ /* Generate a call to a gvec-style helper with four vector operands. */
@ -182,7 +179,7 @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
int32_t data, gen_helper_gvec_4 *fn) int32_t data, gen_helper_gvec_4 *fn)
{ {
TCGv_ptr a0, a1, a2, a3; TCGv_ptr a0, a1, a2, a3;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -200,7 +197,6 @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_ptr(a2); tcg_temp_free_ptr(a2);
tcg_temp_free_ptr(a3); tcg_temp_free_ptr(a3);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with five vector operands. */ /* Generate a call to a gvec-style helper with five vector operands. */
@ -209,7 +205,7 @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn) uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn)
{ {
TCGv_ptr a0, a1, a2, a3, a4; TCGv_ptr a0, a1, a2, a3, a4;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -230,7 +226,6 @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a2); tcg_temp_free_ptr(a2);
tcg_temp_free_ptr(a3); tcg_temp_free_ptr(a3);
tcg_temp_free_ptr(a4); tcg_temp_free_ptr(a4);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with three vector operands /* Generate a call to a gvec-style helper with three vector operands
@ -240,7 +235,7 @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
int32_t data, gen_helper_gvec_2_ptr *fn) int32_t data, gen_helper_gvec_2_ptr *fn)
{ {
TCGv_ptr a0, a1; TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -252,7 +247,6 @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
tcg_temp_free_ptr(a0); tcg_temp_free_ptr(a0);
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with three vector operands /* Generate a call to a gvec-style helper with three vector operands
@ -262,7 +256,7 @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
int32_t data, gen_helper_gvec_3_ptr *fn) int32_t data, gen_helper_gvec_3_ptr *fn)
{ {
TCGv_ptr a0, a1, a2; TCGv_ptr a0, a1, a2;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -277,7 +271,6 @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a0); tcg_temp_free_ptr(a0);
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_ptr(a2); tcg_temp_free_ptr(a2);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with four vector operands /* Generate a call to a gvec-style helper with four vector operands
@ -288,7 +281,7 @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
gen_helper_gvec_4_ptr *fn) gen_helper_gvec_4_ptr *fn)
{ {
TCGv_ptr a0, a1, a2, a3; TCGv_ptr a0, a1, a2, a3;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -306,7 +299,6 @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a1); tcg_temp_free_ptr(a1);
tcg_temp_free_ptr(a2); tcg_temp_free_ptr(a2);
tcg_temp_free_ptr(a3); tcg_temp_free_ptr(a3);
tcg_temp_free_i32(desc);
} }
/* Generate a call to a gvec-style helper with five vector operands /* Generate a call to a gvec-style helper with five vector operands
@ -317,7 +309,7 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
gen_helper_gvec_5_ptr *fn) gen_helper_gvec_5_ptr *fn)
{ {
TCGv_ptr a0, a1, a2, a3, a4; TCGv_ptr a0, a1, a2, a3, a4;
TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr(); a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr(); a1 = tcg_temp_new_ptr();
@ -338,7 +330,6 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_ptr(a2); tcg_temp_free_ptr(a2);
tcg_temp_free_ptr(a3); tcg_temp_free_ptr(a3);
tcg_temp_free_ptr(a4); tcg_temp_free_ptr(a4);
tcg_temp_free_i32(desc);
} }
/* Return true if we want to implement something of OPRSZ bytes /* Return true if we want to implement something of OPRSZ bytes
@ -605,9 +596,9 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
|| (TCG_TARGET_REG_BITS == 64 || (TCG_TARGET_REG_BITS == 64
&& (in_c == 0 || in_c == -1 && (in_c == 0 || in_c == -1
|| !check_size_impl(oprsz, 4)))) { || !check_size_impl(oprsz, 4)))) {
t_64 = tcg_const_i64(in_c); t_64 = tcg_constant_i64(in_c);
} else { } else {
t_32 = tcg_const_i32(in_c); t_32 = tcg_constant_i32(in_c);
} }
} }
@ -648,11 +639,11 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
t_val = tcg_temp_new_i32(); t_val = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t_val, in_64); tcg_gen_extrl_i64_i32(t_val, in_64);
} else { } else {
t_val = tcg_const_i32(in_c); t_val = tcg_constant_i32(in_c);
} }
gen_helper_memset(t_ptr, t_ptr, t_val, t_size); gen_helper_memset(t_ptr, t_ptr, t_val, t_size);
if (!in_32) { if (in_64) {
tcg_temp_free_i32(t_val); tcg_temp_free_i32(t_val);
} }
tcg_temp_free_ptr(t_size); tcg_temp_free_ptr(t_size);
@ -660,15 +651,14 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
return; return;
} }
t_desc = tcg_const_i32(simd_desc(oprsz, maxsz, 0)); t_desc = tcg_constant_i32(simd_desc(oprsz, maxsz, 0));
if (vece == MO_64) { if (vece == MO_64) {
if (in_64) { if (in_64) {
gen_helper_gvec_dup64(t_ptr, t_desc, in_64); gen_helper_gvec_dup64(t_ptr, t_desc, in_64);
} else { } else {
t_64 = tcg_const_i64(in_c); t_64 = tcg_constant_i64(in_c);
gen_helper_gvec_dup64(t_ptr, t_desc, t_64); gen_helper_gvec_dup64(t_ptr, t_desc, t_64);
tcg_temp_free_i64(t_64);
} }
} else { } else {
typedef void dup_fn(TCGv_ptr, TCGv_i32, TCGv_i32); typedef void dup_fn(TCGv_ptr, TCGv_i32, TCGv_i32);
@ -680,24 +670,23 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
if (in_32) { if (in_32) {
fns[vece](t_ptr, t_desc, in_32); fns[vece](t_ptr, t_desc, in_32);
} else { } else if (in_64) {
t_32 = tcg_temp_new_i32(); t_32 = tcg_temp_new_i32();
if (in_64) { tcg_gen_extrl_i64_i32(t_32, in_64);
tcg_gen_extrl_i64_i32(t_32, in_64);
} else if (vece == MO_8) {
tcg_gen_movi_i32(t_32, in_c & 0xff);
} else if (vece == MO_16) {
tcg_gen_movi_i32(t_32, in_c & 0xffff);
} else {
tcg_gen_movi_i32(t_32, in_c);
}
fns[vece](t_ptr, t_desc, t_32); fns[vece](t_ptr, t_desc, t_32);
tcg_temp_free_i32(t_32); tcg_temp_free_i32(t_32);
} else {
if (vece == MO_8) {
in_c &= 0xff;
} else if (vece == MO_16) {
in_c &= 0xffff;
}
t_32 = tcg_constant_i32(in_c);
fns[vece](t_ptr, t_desc, t_32);
} }
} }
tcg_temp_free_ptr(t_ptr); tcg_temp_free_ptr(t_ptr);
tcg_temp_free_i32(t_desc);
return; return;
done: done:
@ -1247,10 +1236,9 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
if (g->fno) { if (g->fno) {
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno); tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
} else { } else {
TCGv_i64 tcg_c = tcg_const_i64(c); TCGv_i64 tcg_c = tcg_constant_i64(c);
tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz, tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz,
maxsz, c, g->fnoi); maxsz, c, g->fnoi);
tcg_temp_free_i64(tcg_c);
} }
oprsz = maxsz; oprsz = maxsz;
} }
@ -1744,16 +1732,14 @@ static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{ {
TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80)); TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
gen_addv_mask(d, a, b, m); gen_addv_mask(d, a, b, m);
tcg_temp_free_i64(m);
} }
void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{ {
TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000)); TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
gen_addv_mask(d, a, b, m); gen_addv_mask(d, a, b, m);
tcg_temp_free_i64(m);
} }
void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@ -1837,9 +1823,8 @@ void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t c, uint32_t oprsz, uint32_t maxsz) int64_t c, uint32_t oprsz, uint32_t maxsz)
{ {
TCGv_i64 tmp = tcg_const_i64(c); TCGv_i64 tmp = tcg_constant_i64(c);
tcg_gen_gvec_adds(vece, dofs, aofs, tmp, oprsz, maxsz); tcg_gen_gvec_adds(vece, dofs, aofs, tmp, oprsz, maxsz);
tcg_temp_free_i64(tmp);
} }
static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 }; static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 };
@ -1897,16 +1882,14 @@ static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{ {
TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80)); TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
gen_subv_mask(d, a, b, m); gen_subv_mask(d, a, b, m);
tcg_temp_free_i64(m);
} }
void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{ {
TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000)); TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
gen_subv_mask(d, a, b, m); gen_subv_mask(d, a, b, m);
tcg_temp_free_i64(m);
} }
void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@ -2017,9 +2000,8 @@ void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t c, uint32_t oprsz, uint32_t maxsz) int64_t c, uint32_t oprsz, uint32_t maxsz)
{ {
TCGv_i64 tmp = tcg_const_i64(c); TCGv_i64 tmp = tcg_constant_i64(c);
tcg_gen_gvec_muls(vece, dofs, aofs, tmp, oprsz, maxsz); tcg_gen_gvec_muls(vece, dofs, aofs, tmp, oprsz, maxsz);
tcg_temp_free_i64(tmp);
} }
void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
@ -2076,18 +2058,16 @@ void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
static void tcg_gen_usadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_usadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{ {
TCGv_i32 max = tcg_const_i32(-1); TCGv_i32 max = tcg_constant_i32(-1);
tcg_gen_add_i32(d, a, b); tcg_gen_add_i32(d, a, b);
tcg_gen_movcond_i32(TCG_COND_LTU, d, d, a, max, d); tcg_gen_movcond_i32(TCG_COND_LTU, d, d, a, max, d);
tcg_temp_free_i32(max);
} }
static void tcg_gen_usadd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) static void tcg_gen_usadd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{ {
TCGv_i64 max = tcg_const_i64(-1); TCGv_i64 max = tcg_constant_i64(-1);
tcg_gen_add_i64(d, a, b); tcg_gen_add_i64(d, a, b);
tcg_gen_movcond_i64(TCG_COND_LTU, d, d, a, max, d); tcg_gen_movcond_i64(TCG_COND_LTU, d, d, a, max, d);
tcg_temp_free_i64(max);
} }
void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
@ -2120,18 +2100,16 @@ void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
static void tcg_gen_ussub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_ussub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{ {
TCGv_i32 min = tcg_const_i32(0); TCGv_i32 min = tcg_constant_i32(0);
tcg_gen_sub_i32(d, a, b); tcg_gen_sub_i32(d, a, b);
tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, min, d); tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, min, d);
tcg_temp_free_i32(min);
} }
static void tcg_gen_ussub_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) static void tcg_gen_ussub_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{ {
TCGv_i64 min = tcg_const_i64(0); TCGv_i64 min = tcg_constant_i64(0);
tcg_gen_sub_i64(d, a, b); tcg_gen_sub_i64(d, a, b);
tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, min, d); tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, min, d);
tcg_temp_free_i64(min);
} }
void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
@ -2292,16 +2270,14 @@ static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m)
void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 b) void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 b)
{ {
TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80)); TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80));
gen_negv_mask(d, b, m); gen_negv_mask(d, b, m);
tcg_temp_free_i64(m);
} }
void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b) void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b)
{ {
TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000)); TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
gen_negv_mask(d, b, m); gen_negv_mask(d, b, m);
tcg_temp_free_i64(m);
} }
void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b) void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b)
@ -2570,9 +2546,8 @@ void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t c, uint32_t oprsz, uint32_t maxsz) int64_t c, uint32_t oprsz, uint32_t maxsz)
{ {
TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c)); TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
tcg_temp_free_i64(tmp);
} }
static const GVecGen2s gop_xors = { static const GVecGen2s gop_xors = {
@ -2595,9 +2570,8 @@ void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t c, uint32_t oprsz, uint32_t maxsz) int64_t c, uint32_t oprsz, uint32_t maxsz)
{ {
TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c)); TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
tcg_temp_free_i64(tmp);
} }
static const GVecGen2s gop_ors = { static const GVecGen2s gop_ors = {
@ -2620,9 +2594,8 @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t c, uint32_t oprsz, uint32_t maxsz) int64_t c, uint32_t oprsz, uint32_t maxsz)
{ {
TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c)); TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c));
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
tcg_temp_free_i64(tmp);
} }
void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c) void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
@ -3110,9 +3083,9 @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
TCGv_vec a, TCGv_vec b) TCGv_vec a, TCGv_vec b)
{ {
TCGv_vec t = tcg_temp_new_vec_matching(d); TCGv_vec t = tcg_temp_new_vec_matching(d);
TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); tcg_gen_and_vec(vece, t, b, m);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_shlv_vec(vece, d, a, t); tcg_gen_shlv_vec(vece, d, a, t);
tcg_temp_free_vec(t); tcg_temp_free_vec(t);
} }
@ -3173,9 +3146,9 @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
TCGv_vec a, TCGv_vec b) TCGv_vec a, TCGv_vec b)
{ {
TCGv_vec t = tcg_temp_new_vec_matching(d); TCGv_vec t = tcg_temp_new_vec_matching(d);
TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); tcg_gen_and_vec(vece, t, b, m);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_shrv_vec(vece, d, a, t); tcg_gen_shrv_vec(vece, d, a, t);
tcg_temp_free_vec(t); tcg_temp_free_vec(t);
} }
@ -3236,9 +3209,9 @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
TCGv_vec a, TCGv_vec b) TCGv_vec a, TCGv_vec b)
{ {
TCGv_vec t = tcg_temp_new_vec_matching(d); TCGv_vec t = tcg_temp_new_vec_matching(d);
TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); tcg_gen_and_vec(vece, t, b, m);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_sarv_vec(vece, d, a, t); tcg_gen_sarv_vec(vece, d, a, t);
tcg_temp_free_vec(t); tcg_temp_free_vec(t);
} }
@ -3299,9 +3272,9 @@ static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d,
TCGv_vec a, TCGv_vec b) TCGv_vec a, TCGv_vec b)
{ {
TCGv_vec t = tcg_temp_new_vec_matching(d); TCGv_vec t = tcg_temp_new_vec_matching(d);
TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); tcg_gen_and_vec(vece, t, b, m);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_rotlv_vec(vece, d, a, t); tcg_gen_rotlv_vec(vece, d, a, t);
tcg_temp_free_vec(t); tcg_temp_free_vec(t);
} }
@ -3358,9 +3331,9 @@ static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d,
TCGv_vec a, TCGv_vec b) TCGv_vec a, TCGv_vec b)
{ {
TCGv_vec t = tcg_temp_new_vec_matching(d); TCGv_vec t = tcg_temp_new_vec_matching(d);
TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1);
tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); tcg_gen_and_vec(vece, t, b, m);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_rotrv_vec(vece, d, a, t); tcg_gen_rotrv_vec(vece, d, a, t);
tcg_temp_free_vec(t); tcg_temp_free_vec(t);
} }

View File

@ -83,7 +83,6 @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list,
case INDEX_op_xor_vec: case INDEX_op_xor_vec:
case INDEX_op_mov_vec: case INDEX_op_mov_vec:
case INDEX_op_dup_vec: case INDEX_op_dup_vec:
case INDEX_op_dupi_vec:
case INDEX_op_dup2_vec: case INDEX_op_dup2_vec:
case INDEX_op_ld_vec: case INDEX_op_ld_vec:
case INDEX_op_st_vec: case INDEX_op_st_vec:
@ -216,25 +215,17 @@ void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a)
} }
} }
#define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32)
static void do_dupi_vec(TCGv_vec r, unsigned vece, TCGArg a)
{
TCGTemp *rt = tcgv_vec_temp(r);
vec_gen_2(INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a);
}
TCGv_vec tcg_const_zeros_vec(TCGType type) TCGv_vec tcg_const_zeros_vec(TCGType type)
{ {
TCGv_vec ret = tcg_temp_new_vec(type); TCGv_vec ret = tcg_temp_new_vec(type);
do_dupi_vec(ret, MO_REG, 0); tcg_gen_dupi_vec(MO_64, ret, 0);
return ret; return ret;
} }
TCGv_vec tcg_const_ones_vec(TCGType type) TCGv_vec tcg_const_ones_vec(TCGType type)
{ {
TCGv_vec ret = tcg_temp_new_vec(type); TCGv_vec ret = tcg_temp_new_vec(type);
do_dupi_vec(ret, MO_REG, -1); tcg_gen_dupi_vec(MO_64, ret, -1);
return ret; return ret;
} }
@ -250,41 +241,10 @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
return tcg_const_ones_vec(t->base_type); return tcg_const_ones_vec(t->base_type);
} }
void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a)
{
if (TCG_TARGET_REG_BITS == 64) {
do_dupi_vec(r, MO_64, a);
} else if (a == dup_const(MO_32, a)) {
do_dupi_vec(r, MO_32, a);
} else {
TCGv_i64 c = tcg_const_i64(a);
tcg_gen_dup_i64_vec(MO_64, r, c);
tcg_temp_free_i64(c);
}
}
void tcg_gen_dup32i_vec(TCGv_vec r, uint32_t a)
{
do_dupi_vec(r, MO_REG, dup_const(MO_32, a));
}
void tcg_gen_dup16i_vec(TCGv_vec r, uint32_t a)
{
do_dupi_vec(r, MO_REG, dup_const(MO_16, a));
}
void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a)
{
do_dupi_vec(r, MO_REG, dup_const(MO_8, a));
}
void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a) void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
{ {
if (vece == MO_64) { TCGTemp *rt = tcgv_vec_temp(r);
tcg_gen_dup64i_vec(r, a); tcg_gen_mov_vec(r, tcg_constant_vec(rt->base_type, vece, a));
} else {
do_dupi_vec(r, MO_REG, dup_const(vece, a));
}
} }
void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a) void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a)
@ -489,8 +449,8 @@ void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) { if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1); tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1);
} else { } else {
do_dupi_vec(t, MO_REG, 0); tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a,
tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, t); tcg_constant_vec(type, vece, 0));
} }
tcg_gen_xor_vec(vece, r, a, t); tcg_gen_xor_vec(vece, r, a, t);
tcg_gen_sub_vec(vece, r, r, t); tcg_gen_sub_vec(vece, r, r, t);

View File

@ -104,15 +104,18 @@ void tcg_gen_mb(TCGBar mb_type)
/* 32 bit ops */ /* 32 bit ops */
void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
{
tcg_gen_mov_i32(ret, tcg_constant_i32(arg));
}
void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{ {
/* some cases can be optimized here */ /* some cases can be optimized here */
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_add_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_add_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -122,9 +125,7 @@ void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
/* Don't recurse with tcg_gen_neg_i32. */ /* Don't recurse with tcg_gen_neg_i32. */
tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2); tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg1); tcg_gen_sub_i32(ret, tcg_constant_i32(arg1), arg2);
tcg_gen_sub_i32(ret, t0, arg2);
tcg_temp_free_i32(t0);
} }
} }
@ -134,15 +135,12 @@ void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_sub_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_sub_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{ {
TCGv_i32 t0;
/* Some cases can be optimized here. */ /* Some cases can be optimized here. */
switch (arg2) { switch (arg2) {
case 0: case 0:
@ -165,9 +163,8 @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
} }
break; break;
} }
t0 = tcg_const_i32(arg2);
tcg_gen_and_i32(ret, arg1, t0); tcg_gen_and_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_temp_free_i32(t0);
} }
void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@ -178,9 +175,7 @@ void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
} else if (arg2 == 0) { } else if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_or_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_or_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -193,9 +188,7 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
/* Don't recurse with tcg_gen_not_i32. */ /* Don't recurse with tcg_gen_not_i32. */
tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1); tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_xor_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -205,9 +198,7 @@ void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_shl_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_shl_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -217,9 +208,7 @@ void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_shr_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_shr_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -229,9 +218,7 @@ void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_sar_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_sar_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -250,9 +237,7 @@ void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l)
if (cond == TCG_COND_ALWAYS) { if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l); tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) { } else if (cond != TCG_COND_NEVER) {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_brcond_i32(cond, arg1, tcg_constant_i32(arg2), l);
tcg_gen_brcond_i32(cond, arg1, t0, l);
tcg_temp_free_i32(t0);
} }
} }
@ -271,9 +256,7 @@ void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
TCGv_i32 arg1, int32_t arg2) TCGv_i32 arg1, int32_t arg2)
{ {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_setcond_i32(cond, ret, arg1, tcg_constant_i32(arg2));
tcg_gen_setcond_i32(cond, ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@ -283,9 +266,7 @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
} else if (is_power_of_2(arg2)) { } else if (is_power_of_2(arg2)) {
tcg_gen_shli_i32(ret, arg1, ctz32(arg2)); tcg_gen_shli_i32(ret, arg1, ctz32(arg2));
} else { } else {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_mul_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_mul_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} }
} }
@ -433,9 +414,7 @@ void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
{ {
TCGv_i32 t = tcg_const_i32(arg2); tcg_gen_clz_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_clz_i32(ret, arg1, t);
tcg_temp_free_i32(t);
} }
void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
@ -468,10 +447,9 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_gen_clzi_i32(t, t, 32); tcg_gen_clzi_i32(t, t, 32);
tcg_gen_xori_i32(t, t, 31); tcg_gen_xori_i32(t, t, 31);
} }
z = tcg_const_i32(0); z = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t); tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
tcg_temp_free_i32(t); tcg_temp_free_i32(t);
tcg_temp_free_i32(z);
} else { } else {
gen_helper_ctz_i32(ret, arg1, arg2); gen_helper_ctz_i32(ret, arg1, arg2);
} }
@ -487,9 +465,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
tcg_gen_ctpop_i32(ret, t); tcg_gen_ctpop_i32(ret, t);
tcg_temp_free_i32(t); tcg_temp_free_i32(t);
} else { } else {
TCGv_i32 t = tcg_const_i32(arg2); tcg_gen_ctz_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_ctz_i32(ret, arg1, t);
tcg_temp_free_i32(t);
} }
} }
@ -547,9 +523,7 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1); tcg_gen_mov_i32(ret, arg1);
} else if (TCG_TARGET_HAS_rot_i32) { } else if (TCG_TARGET_HAS_rot_i32) {
TCGv_i32 t0 = tcg_const_i32(arg2); tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2));
tcg_gen_rotl_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} else { } else {
TCGv_i32 t0, t1; TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32(); t0 = tcg_temp_new_i32();
@ -653,9 +627,8 @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
tcg_gen_andi_i32(ret, arg, (1u << len) - 1); tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
} else if (TCG_TARGET_HAS_deposit_i32 } else if (TCG_TARGET_HAS_deposit_i32
&& TCG_TARGET_deposit_i32_valid(ofs, len)) { && TCG_TARGET_deposit_i32_valid(ofs, len)) {
TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 zero = tcg_constant_i32(0);
tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len); tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
tcg_temp_free_i32(zero);
} else { } else {
/* To help two-operand hosts we prefer to zero-extend first, /* To help two-operand hosts we prefer to zero-extend first,
which allows ARG to stay live. */ which allows ARG to stay live. */
@ -1052,7 +1025,7 @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
} else { } else {
TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_const_i32(0x00ff00ff); TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff);
/* arg = abcd */ /* arg = abcd */
tcg_gen_shri_i32(t0, arg, 8); /* t0 = .abc */ tcg_gen_shri_i32(t0, arg, 8); /* t0 = .abc */
@ -1067,7 +1040,6 @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
tcg_temp_free_i32(t0); tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1); tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
} }
} }
@ -1114,8 +1086,15 @@ void tcg_gen_discard_i64(TCGv_i64 arg)
void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
{ {
tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); TCGTemp *ts = tcgv_i64_temp(arg);
tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
/* Canonicalize TCGv_i64 TEMP_CONST into TCGv_i32 TEMP_CONST. */
if (ts->kind == TEMP_CONST) {
tcg_gen_movi_i64(ret, ts->val);
} else {
tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
}
} }
void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
@ -1237,6 +1216,14 @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_temp_free_i64(t0); tcg_temp_free_i64(t0);
tcg_temp_free_i32(t1); tcg_temp_free_i32(t1);
} }
#else
void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
{
tcg_gen_mov_i64(ret, tcg_constant_i64(arg));
}
#endif /* TCG_TARGET_REG_SIZE == 32 */ #endif /* TCG_TARGET_REG_SIZE == 32 */
void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
@ -1244,10 +1231,12 @@ void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
/* some cases can be optimized here */ /* some cases can be optimized here */
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else if (TCG_TARGET_REG_BITS == 64) {
tcg_gen_add_i64(ret, arg1, tcg_constant_i64(arg2));
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret),
tcg_gen_add_i64(ret, arg1, t0); TCGV_LOW(arg1), TCGV_HIGH(arg1),
tcg_temp_free_i64(t0); tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32));
} }
} }
@ -1256,10 +1245,12 @@ void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2)
if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) { if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) {
/* Don't recurse with tcg_gen_neg_i64. */ /* Don't recurse with tcg_gen_neg_i64. */
tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2); tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2);
} else if (TCG_TARGET_REG_BITS == 64) {
tcg_gen_sub_i64(ret, tcg_constant_i64(arg1), arg2);
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg1); tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret),
tcg_gen_sub_i64(ret, t0, arg2); tcg_constant_i32(arg1), tcg_constant_i32(arg1 >> 32),
tcg_temp_free_i64(t0); TCGV_LOW(arg2), TCGV_HIGH(arg2));
} }
} }
@ -1268,17 +1259,17 @@ void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
/* some cases can be optimized here */ /* some cases can be optimized here */
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else if (TCG_TARGET_REG_BITS == 64) {
tcg_gen_sub_i64(ret, arg1, tcg_constant_i64(arg2));
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret),
tcg_gen_sub_i64(ret, arg1, t0); TCGV_LOW(arg1), TCGV_HIGH(arg1),
tcg_temp_free_i64(t0); tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32));
} }
} }
void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{ {
TCGv_i64 t0;
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
@ -1313,9 +1304,8 @@ void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
} }
break; break;
} }
t0 = tcg_const_i64(arg2);
tcg_gen_and_i64(ret, arg1, t0); tcg_gen_and_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_temp_free_i64(t0);
} }
void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
@ -1331,9 +1321,7 @@ void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
} else if (arg2 == 0) { } else if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_or_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_gen_or_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} }
} }
@ -1351,9 +1339,7 @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
/* Don't recurse with tcg_gen_not_i64. */ /* Don't recurse with tcg_gen_not_i64. */
tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1); tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_gen_xor_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} }
} }
@ -1415,9 +1401,7 @@ void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
} else if (arg2 == 0) { } else if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_shl_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_gen_shl_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} }
} }
@ -1429,9 +1413,7 @@ void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
} else if (arg2 == 0) { } else if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_shr_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_gen_shr_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} }
} }
@ -1443,9 +1425,7 @@ void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
} else if (arg2 == 0) { } else if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else { } else {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_sar_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_gen_sar_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} }
} }
@ -1468,12 +1448,17 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l)
void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l)
{ {
if (cond == TCG_COND_ALWAYS) { if (TCG_TARGET_REG_BITS == 64) {
tcg_gen_brcond_i64(cond, arg1, tcg_constant_i64(arg2), l);
} else if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(l); tcg_gen_br(l);
} else if (cond != TCG_COND_NEVER) { } else if (cond != TCG_COND_NEVER) {
TCGv_i64 t0 = tcg_const_i64(arg2); l->refs++;
tcg_gen_brcond_i64(cond, arg1, t0, l); tcg_gen_op6ii_i32(INDEX_op_brcond2_i32,
tcg_temp_free_i64(t0); TCGV_LOW(arg1), TCGV_HIGH(arg1),
tcg_constant_i32(arg2),
tcg_constant_i32(arg2 >> 32),
cond, label_arg(l));
} }
} }
@ -1499,9 +1484,19 @@ void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
TCGv_i64 arg1, int64_t arg2) TCGv_i64 arg1, int64_t arg2)
{ {
TCGv_i64 t0 = tcg_const_i64(arg2); if (TCG_TARGET_REG_BITS == 64) {
tcg_gen_setcond_i64(cond, ret, arg1, t0); tcg_gen_setcond_i64(cond, ret, arg1, tcg_constant_i64(arg2));
tcg_temp_free_i64(t0); } else if (cond == TCG_COND_ALWAYS) {
tcg_gen_movi_i64(ret, 1);
} else if (cond == TCG_COND_NEVER) {
tcg_gen_movi_i64(ret, 0);
} else {
tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1),
tcg_constant_i32(arg2),
tcg_constant_i32(arg2 >> 32), cond);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
}
} }
void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
@ -1690,7 +1685,7 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
} else { } else {
TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_const_i64(0x00ff00ff); TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff);
/* arg = ....abcd */ /* arg = ....abcd */
tcg_gen_shri_i64(t0, arg, 8); /* t0 = .....abc */ tcg_gen_shri_i64(t0, arg, 8); /* t0 = .....abc */
@ -1706,7 +1701,6 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
tcg_temp_free_i64(t0); tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1); tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
} }
} }
@ -1850,16 +1844,16 @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
if (TCG_TARGET_REG_BITS == 32 if (TCG_TARGET_REG_BITS == 32
&& TCG_TARGET_HAS_clz_i32 && TCG_TARGET_HAS_clz_i32
&& arg2 <= 0xffffffffu) { && arg2 <= 0xffffffffu) {
TCGv_i32 t = tcg_const_i32((uint32_t)arg2 - 32); TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_clz_i32(t, TCGV_LOW(arg1), t); tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32);
tcg_gen_addi_i32(t, t, 32); tcg_gen_addi_i32(t, t, 32);
tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t); tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
tcg_temp_free_i32(t); tcg_temp_free_i32(t);
} else { } else {
TCGv_i64 t = tcg_const_i64(arg2); TCGv_i64 t0 = tcg_const_i64(arg2);
tcg_gen_clz_i64(ret, arg1, t); tcg_gen_clz_i64(ret, arg1, t0);
tcg_temp_free_i64(t); tcg_temp_free_i64(t0);
} }
} }
@ -1881,7 +1875,7 @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_gen_clzi_i64(t, t, 64); tcg_gen_clzi_i64(t, t, 64);
tcg_gen_xori_i64(t, t, 63); tcg_gen_xori_i64(t, t, 63);
} }
z = tcg_const_i64(0); z = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t); tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
tcg_temp_free_i64(t); tcg_temp_free_i64(t);
tcg_temp_free_i64(z); tcg_temp_free_i64(z);
@ -1895,8 +1889,8 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
if (TCG_TARGET_REG_BITS == 32 if (TCG_TARGET_REG_BITS == 32
&& TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctz_i32
&& arg2 <= 0xffffffffu) { && arg2 <= 0xffffffffu) {
TCGv_i32 t32 = tcg_const_i32((uint32_t)arg2 - 32); TCGv_i32 t32 = tcg_temp_new_i32();
tcg_gen_ctz_i32(t32, TCGV_HIGH(arg1), t32); tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32);
tcg_gen_addi_i32(t32, t32, 32); tcg_gen_addi_i32(t32, t32, 32);
tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
@ -1911,9 +1905,9 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
tcg_gen_ctpop_i64(ret, t); tcg_gen_ctpop_i64(ret, t);
tcg_temp_free_i64(t); tcg_temp_free_i64(t);
} else { } else {
TCGv_i64 t64 = tcg_const_i64(arg2); TCGv_i64 t0 = tcg_const_i64(arg2);
tcg_gen_ctz_i64(ret, arg1, t64); tcg_gen_ctz_i64(ret, arg1, t0);
tcg_temp_free_i64(t64); tcg_temp_free_i64(t0);
} }
} }
@ -1969,9 +1963,7 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
if (arg2 == 0) { if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1); tcg_gen_mov_i64(ret, arg1);
} else if (TCG_TARGET_HAS_rot_i64) { } else if (TCG_TARGET_HAS_rot_i64) {
TCGv_i64 t0 = tcg_const_i64(arg2); tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2));
tcg_gen_rotl_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} else { } else {
TCGv_i64 t0, t1; TCGv_i64 t0, t1;
t0 = tcg_temp_new_i64(); t0 = tcg_temp_new_i64();
@ -2089,9 +2081,8 @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
} else if (TCG_TARGET_HAS_deposit_i64 } else if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(ofs, len)) { && TCG_TARGET_deposit_i64_valid(ofs, len)) {
TCGv_i64 zero = tcg_const_i64(0); TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len); tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
tcg_temp_free_i64(zero);
} else { } else {
if (TCG_TARGET_REG_BITS == 32) { if (TCG_TARGET_REG_BITS == 32) {
if (ofs >= 32) { if (ofs >= 32) {
@ -3117,9 +3108,8 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
gen(retv, cpu_env, addr, cmpv, newv, oi); gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
tcg_temp_free_i32(oi);
} }
#else #else
gen(retv, cpu_env, addr, cmpv, newv); gen(retv, cpu_env, addr, cmpv, newv);
@ -3162,9 +3152,8 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx)); TCGMemOpIdx oi = make_memop_idx(memop, idx);
gen(retv, cpu_env, addr, cmpv, newv, oi); gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
tcg_temp_free_i32(oi);
} }
#else #else
gen(retv, cpu_env, addr, cmpv, newv); gen(retv, cpu_env, addr, cmpv, newv);
@ -3226,9 +3215,8 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
gen(ret, cpu_env, addr, val, oi); gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
tcg_temp_free_i32(oi);
} }
#else #else
gen(ret, cpu_env, addr, val); gen(ret, cpu_env, addr, val);
@ -3272,9 +3260,8 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
{ {
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
gen(ret, cpu_env, addr, val, oi); gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
tcg_temp_free_i32(oi);
} }
#else #else
gen(ret, cpu_env, addr, val); gen(ret, cpu_env, addr, val);

488
tcg/tcg.c
View File

@ -117,8 +117,8 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg src); TCGReg dst, TCGReg src);
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, TCGReg base, intptr_t offset); TCGReg dst, TCGReg base, intptr_t offset);
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, tcg_target_long arg); TCGReg dst, int64_t arg);
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
unsigned vece, const TCGArg *args, unsigned vece, const TCGArg *args,
const int *const_args); const int *const_args);
@ -133,8 +133,8 @@ static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
{ {
g_assert_not_reached(); g_assert_not_reached();
} }
static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
TCGReg dst, tcg_target_long arg) TCGReg dst, int64_t arg)
{ {
g_assert_not_reached(); g_assert_not_reached();
} }
@ -1184,6 +1184,13 @@ void tcg_func_start(TCGContext *s)
/* No temps have been previously allocated for size or locality. */ /* No temps have been previously allocated for size or locality. */
memset(s->free_temps, 0, sizeof(s->free_temps)); memset(s->free_temps, 0, sizeof(s->free_temps));
/* No constant temps have been previously allocated. */
for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
if (s->const_table[i]) {
g_hash_table_remove_all(s->const_table[i]);
}
}
s->nb_ops = 0; s->nb_ops = 0;
s->nb_labels = 0; s->nb_labels = 0;
s->current_frame_offset = s->frame_start; s->current_frame_offset = s->frame_start;
@ -1211,7 +1218,7 @@ static inline TCGTemp *tcg_global_alloc(TCGContext *s)
tcg_debug_assert(s->nb_globals == s->nb_temps); tcg_debug_assert(s->nb_globals == s->nb_temps);
s->nb_globals++; s->nb_globals++;
ts = tcg_temp_alloc(s); ts = tcg_temp_alloc(s);
ts->temp_global = 1; ts->kind = TEMP_GLOBAL;
return ts; return ts;
} }
@ -1228,7 +1235,7 @@ static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
ts = tcg_global_alloc(s); ts = tcg_global_alloc(s);
ts->base_type = type; ts->base_type = type;
ts->type = type; ts->type = type;
ts->fixed_reg = 1; ts->kind = TEMP_FIXED;
ts->reg = reg; ts->reg = reg;
ts->name = name; ts->name = name;
tcg_regset_set_reg(s->reserved_regs, reg); tcg_regset_set_reg(s->reserved_regs, reg);
@ -1255,13 +1262,19 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
bigendian = 1; bigendian = 1;
#endif #endif
if (!base_ts->fixed_reg) { switch (base_ts->kind) {
case TEMP_FIXED:
break;
case TEMP_GLOBAL:
/* We do not support double-indirect registers. */ /* We do not support double-indirect registers. */
tcg_debug_assert(!base_ts->indirect_reg); tcg_debug_assert(!base_ts->indirect_reg);
base_ts->indirect_base = 1; base_ts->indirect_base = 1;
s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
? 2 : 1); ? 2 : 1);
indirect_reg = 1; indirect_reg = 1;
break;
default:
g_assert_not_reached();
} }
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
@ -1303,6 +1316,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
{ {
TCGContext *s = tcg_ctx; TCGContext *s = tcg_ctx;
TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
TCGTemp *ts; TCGTemp *ts;
int idx, k; int idx, k;
@ -1315,7 +1329,7 @@ TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
ts = &s->temps[idx]; ts = &s->temps[idx];
ts->temp_allocated = 1; ts->temp_allocated = 1;
tcg_debug_assert(ts->base_type == type); tcg_debug_assert(ts->base_type == type);
tcg_debug_assert(ts->temp_local == temp_local); tcg_debug_assert(ts->kind == kind);
} else { } else {
ts = tcg_temp_alloc(s); ts = tcg_temp_alloc(s);
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
@ -1324,18 +1338,18 @@ TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
ts->base_type = type; ts->base_type = type;
ts->type = TCG_TYPE_I32; ts->type = TCG_TYPE_I32;
ts->temp_allocated = 1; ts->temp_allocated = 1;
ts->temp_local = temp_local; ts->kind = kind;
tcg_debug_assert(ts2 == ts + 1); tcg_debug_assert(ts2 == ts + 1);
ts2->base_type = TCG_TYPE_I64; ts2->base_type = TCG_TYPE_I64;
ts2->type = TCG_TYPE_I32; ts2->type = TCG_TYPE_I32;
ts2->temp_allocated = 1; ts2->temp_allocated = 1;
ts2->temp_local = temp_local; ts2->kind = kind;
} else { } else {
ts->base_type = type; ts->base_type = type;
ts->type = type; ts->type = type;
ts->temp_allocated = 1; ts->temp_allocated = 1;
ts->temp_local = temp_local; ts->kind = kind;
} }
} }
@ -1385,6 +1399,11 @@ void tcg_temp_free_internal(TCGTemp *ts)
TCGContext *s = tcg_ctx; TCGContext *s = tcg_ctx;
int k, idx; int k, idx;
/* In order to simplify users of tcg_constant_*, silently ignore free. */
if (ts->kind == TEMP_CONST) {
return;
}
#if defined(CONFIG_DEBUG_TCG) #if defined(CONFIG_DEBUG_TCG)
s->temps_in_use--; s->temps_in_use--;
if (s->temps_in_use < 0) { if (s->temps_in_use < 0) {
@ -1392,15 +1411,77 @@ void tcg_temp_free_internal(TCGTemp *ts)
} }
#endif #endif
tcg_debug_assert(ts->temp_global == 0); tcg_debug_assert(ts->kind < TEMP_GLOBAL);
tcg_debug_assert(ts->temp_allocated != 0); tcg_debug_assert(ts->temp_allocated != 0);
ts->temp_allocated = 0; ts->temp_allocated = 0;
idx = temp_idx(ts); idx = temp_idx(ts);
k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
set_bit(idx, s->free_temps[k].l); set_bit(idx, s->free_temps[k].l);
} }
TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
{
TCGContext *s = tcg_ctx;
GHashTable *h = s->const_table[type];
TCGTemp *ts;
if (h == NULL) {
h = g_hash_table_new(g_int64_hash, g_int64_equal);
s->const_table[type] = h;
}
ts = g_hash_table_lookup(h, &val);
if (ts == NULL) {
ts = tcg_temp_alloc(s);
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
TCGTemp *ts2 = tcg_temp_alloc(s);
ts->base_type = TCG_TYPE_I64;
ts->type = TCG_TYPE_I32;
ts->kind = TEMP_CONST;
ts->temp_allocated = 1;
/*
* Retain the full value of the 64-bit constant in the low
* part, so that the hash table works. Actual uses will
* truncate the value to the low part.
*/
ts->val = val;
tcg_debug_assert(ts2 == ts + 1);
ts2->base_type = TCG_TYPE_I64;
ts2->type = TCG_TYPE_I32;
ts2->kind = TEMP_CONST;
ts2->temp_allocated = 1;
ts2->val = val >> 32;
} else {
ts->base_type = type;
ts->type = type;
ts->kind = TEMP_CONST;
ts->temp_allocated = 1;
ts->val = val;
}
g_hash_table_insert(h, &ts->val, ts);
}
return ts;
}
TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
{
val = dup_const(vece, val);
return temp_tcgv_vec(tcg_constant_internal(type, val));
}
TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
{
TCGTemp *t = tcgv_vec_temp(match);
tcg_debug_assert(t->temp_allocated != 0);
return tcg_constant_vec(t->base_type, vece, val);
}
TCGv_i32 tcg_const_i32(int32_t val) TCGv_i32 tcg_const_i32(int32_t val)
{ {
TCGv_i32 t0; TCGv_i32 t0;
@ -1483,7 +1564,6 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_HAS_goto_ptr; return TCG_TARGET_HAS_goto_ptr;
case INDEX_op_mov_i32: case INDEX_op_mov_i32:
case INDEX_op_movi_i32:
case INDEX_op_setcond_i32: case INDEX_op_setcond_i32:
case INDEX_op_brcond_i32: case INDEX_op_brcond_i32:
case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i32:
@ -1577,7 +1657,6 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_REG_BITS == 32; return TCG_TARGET_REG_BITS == 32;
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i64:
case INDEX_op_setcond_i64: case INDEX_op_setcond_i64:
case INDEX_op_brcond_i64: case INDEX_op_brcond_i64:
case INDEX_op_ld8u_i64: case INDEX_op_ld8u_i64:
@ -1683,7 +1762,6 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_mov_vec: case INDEX_op_mov_vec:
case INDEX_op_dup_vec: case INDEX_op_dup_vec:
case INDEX_op_dupi_vec:
case INDEX_op_dupm_vec: case INDEX_op_dupm_vec:
case INDEX_op_ld_vec: case INDEX_op_ld_vec:
case INDEX_op_st_vec: case INDEX_op_st_vec:
@ -1930,17 +2008,30 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
static void tcg_reg_alloc_start(TCGContext *s) static void tcg_reg_alloc_start(TCGContext *s)
{ {
int i, n; int i, n;
TCGTemp *ts;
for (i = 0, n = s->nb_globals; i < n; i++) { for (i = 0, n = s->nb_temps; i < n; i++) {
ts = &s->temps[i]; TCGTemp *ts = &s->temps[i];
ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM); TCGTempVal val = TEMP_VAL_MEM;
}
for (n = s->nb_temps; i < n; i++) { switch (ts->kind) {
ts = &s->temps[i]; case TEMP_CONST:
ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD); val = TEMP_VAL_CONST;
ts->mem_allocated = 0; break;
ts->fixed_reg = 0; case TEMP_FIXED:
val = TEMP_VAL_REG;
break;
case TEMP_GLOBAL:
break;
case TEMP_NORMAL:
val = TEMP_VAL_DEAD;
/* fall through */
case TEMP_LOCAL:
ts->mem_allocated = 0;
break;
default:
g_assert_not_reached();
}
ts->val_type = val;
} }
memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp)); memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
@ -1951,12 +2042,37 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
{ {
int idx = temp_idx(ts); int idx = temp_idx(ts);
if (ts->temp_global) { switch (ts->kind) {
case TEMP_FIXED:
case TEMP_GLOBAL:
pstrcpy(buf, buf_size, ts->name); pstrcpy(buf, buf_size, ts->name);
} else if (ts->temp_local) { break;
case TEMP_LOCAL:
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
} else { break;
case TEMP_NORMAL:
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
break;
case TEMP_CONST:
switch (ts->type) {
case TCG_TYPE_I32:
snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
break;
#if TCG_TARGET_REG_BITS > 32
case TCG_TYPE_I64:
snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
break;
#endif
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
snprintf(buf, buf_size, "v%d$0x%" PRIx64,
64 << (ts->type - TCG_TYPE_V64), ts->val);
break;
default:
g_assert_not_reached();
}
break;
} }
return buf; return buf;
} }
@ -2547,15 +2663,25 @@ static void la_bb_end(TCGContext *s, int ng, int nt)
{ {
int i; int i;
for (i = 0; i < ng; ++i) { for (i = 0; i < nt; ++i) {
s->temps[i].state = TS_DEAD | TS_MEM; TCGTemp *ts = &s->temps[i];
la_reset_pref(&s->temps[i]); int state;
}
for (i = ng; i < nt; ++i) { switch (ts->kind) {
s->temps[i].state = (s->temps[i].temp_local case TEMP_FIXED:
? TS_DEAD | TS_MEM case TEMP_GLOBAL:
: TS_DEAD); case TEMP_LOCAL:
la_reset_pref(&s->temps[i]); state = TS_DEAD | TS_MEM;
break;
case TEMP_NORMAL:
case TEMP_CONST:
state = TS_DEAD;
break;
default:
g_assert_not_reached();
}
ts->state = state;
la_reset_pref(ts);
} }
} }
@ -2583,14 +2709,24 @@ static void la_bb_sync(TCGContext *s, int ng, int nt)
la_global_sync(s, ng); la_global_sync(s, ng);
for (int i = ng; i < nt; ++i) { for (int i = ng; i < nt; ++i) {
if (s->temps[i].temp_local) { TCGTemp *ts = &s->temps[i];
int state = s->temps[i].state; int state;
s->temps[i].state = state | TS_MEM;
switch (ts->kind) {
case TEMP_LOCAL:
state = ts->state;
ts->state = state | TS_MEM;
if (state != TS_DEAD) { if (state != TS_DEAD) {
continue; continue;
} }
} else { break;
case TEMP_NORMAL:
s->temps[i].state = TS_DEAD; s->temps[i].state = TS_DEAD;
break;
case TEMP_CONST:
continue;
default:
g_assert_not_reached();
} }
la_reset_pref(&s->temps[i]); la_reset_pref(&s->temps[i]);
} }
@ -3151,7 +3287,7 @@ static void dump_regs(TCGContext *s)
tcg_target_reg_names[ts->mem_base->reg]); tcg_target_reg_names[ts->mem_base->reg]);
break; break;
case TEMP_VAL_CONST: case TEMP_VAL_CONST:
printf("$0x%" TCG_PRIlx, ts->val); printf("$0x%" PRIx64, ts->val);
break; break;
case TEMP_VAL_DEAD: case TEMP_VAL_DEAD:
printf("D"); printf("D");
@ -3191,7 +3327,8 @@ static void check_regs(TCGContext *s)
} }
for (k = 0; k < s->nb_temps; k++) { for (k = 0; k < s->nb_temps; k++) {
ts = &s->temps[k]; ts = &s->temps[k];
if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg if (ts->val_type == TEMP_VAL_REG
&& ts->kind != TEMP_FIXED
&& s->reg_to_temp[ts->reg] != ts) { && s->reg_to_temp[ts->reg] != ts) {
printf("Inconsistency for temp %s:\n", printf("Inconsistency for temp %s:\n",
tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
@ -3228,16 +3365,28 @@ static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
mark it free; otherwise mark it dead. */ mark it free; otherwise mark it dead. */
static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
{ {
if (ts->fixed_reg) { TCGTempVal new_type;
switch (ts->kind) {
case TEMP_FIXED:
return; return;
case TEMP_GLOBAL:
case TEMP_LOCAL:
new_type = TEMP_VAL_MEM;
break;
case TEMP_NORMAL:
new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
break;
case TEMP_CONST:
new_type = TEMP_VAL_CONST;
break;
default:
g_assert_not_reached();
} }
if (ts->val_type == TEMP_VAL_REG) { if (ts->val_type == TEMP_VAL_REG) {
s->reg_to_temp[ts->reg] = NULL; s->reg_to_temp[ts->reg] = NULL;
} }
ts->val_type = (free_or_dead < 0 ts->val_type = new_type;
|| ts->temp_local
|| ts->temp_global
? TEMP_VAL_MEM : TEMP_VAL_DEAD);
} }
/* Mark a temporary as dead. */ /* Mark a temporary as dead. */
@ -3253,10 +3402,7 @@ static inline void temp_dead(TCGContext *s, TCGTemp *ts)
static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
TCGRegSet preferred_regs, int free_or_dead) TCGRegSet preferred_regs, int free_or_dead)
{ {
if (ts->fixed_reg) { if (!temp_readonly(ts) && !ts->mem_coherent) {
return;
}
if (!ts->mem_coherent) {
if (!ts->mem_allocated) { if (!ts->mem_allocated) {
temp_allocate_frame(s, ts); temp_allocate_frame(s, ts);
} }
@ -3387,7 +3533,27 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
case TEMP_VAL_CONST: case TEMP_VAL_CONST:
reg = tcg_reg_alloc(s, desired_regs, allocated_regs, reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
preferred_regs, ts->indirect_base); preferred_regs, ts->indirect_base);
tcg_out_movi(s, ts->type, reg, ts->val); if (ts->type <= TCG_TYPE_I64) {
tcg_out_movi(s, ts->type, reg, ts->val);
} else {
uint64_t val = ts->val;
MemOp vece = MO_64;
/*
* Find the minimal vector element that matches the constant.
* The targets will, in general, have to do this search anyway,
* do this generically.
*/
if (val == dup_const(MO_8, val)) {
vece = MO_8;
} else if (val == dup_const(MO_16, val)) {
vece = MO_16;
} else if (val == dup_const(MO_32, val)) {
vece = MO_32;
}
tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
}
ts->mem_coherent = 0; ts->mem_coherent = 0;
break; break;
case TEMP_VAL_MEM: case TEMP_VAL_MEM:
@ -3411,7 +3577,7 @@ static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
{ {
/* The liveness analysis already ensures that globals are back /* The liveness analysis already ensures that globals are back
in memory. Keep an tcg_debug_assert for safety. */ in memory. Keep an tcg_debug_assert for safety. */
tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg); tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
} }
/* save globals to their canonical location and assume they can be /* save globals to their canonical location and assume they can be
@ -3436,7 +3602,7 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
for (i = 0, n = s->nb_globals; i < n; i++) { for (i = 0, n = s->nb_globals; i < n; i++) {
TCGTemp *ts = &s->temps[i]; TCGTemp *ts = &s->temps[i];
tcg_debug_assert(ts->val_type != TEMP_VAL_REG tcg_debug_assert(ts->val_type != TEMP_VAL_REG
|| ts->fixed_reg || ts->kind == TEMP_FIXED
|| ts->mem_coherent); || ts->mem_coherent);
} }
} }
@ -3449,12 +3615,22 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
for (i = s->nb_globals; i < s->nb_temps; i++) { for (i = s->nb_globals; i < s->nb_temps; i++) {
TCGTemp *ts = &s->temps[i]; TCGTemp *ts = &s->temps[i];
if (ts->temp_local) {
switch (ts->kind) {
case TEMP_LOCAL:
temp_save(s, ts, allocated_regs); temp_save(s, ts, allocated_regs);
} else { break;
case TEMP_NORMAL:
/* The liveness analysis already ensures that temps are dead. /* The liveness analysis already ensures that temps are dead.
Keep an tcg_debug_assert for safety. */ Keep an tcg_debug_assert for safety. */
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
break;
case TEMP_CONST:
/* Similarly, we should have freed any allocated register. */
tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
break;
default:
g_assert_not_reached();
} }
} }
@ -3475,23 +3651,30 @@ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
* The liveness analysis already ensures that temps are dead. * The liveness analysis already ensures that temps are dead.
* Keep tcg_debug_asserts for safety. * Keep tcg_debug_asserts for safety.
*/ */
if (ts->temp_local) { switch (ts->kind) {
case TEMP_LOCAL:
tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent); tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
} else { break;
case TEMP_NORMAL:
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
break;
case TEMP_CONST:
break;
default:
g_assert_not_reached();
} }
} }
} }
/* /*
* Specialized code generation for INDEX_op_movi_*. * Specialized code generation for INDEX_op_mov_* with a constant.
*/ */
static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
tcg_target_ulong val, TCGLifeData arg_life, tcg_target_ulong val, TCGLifeData arg_life,
TCGRegSet preferred_regs) TCGRegSet preferred_regs)
{ {
/* ENV should not be modified. */ /* ENV should not be modified. */
tcg_debug_assert(!ots->fixed_reg); tcg_debug_assert(!temp_readonly(ots));
/* The movi is not explicitly generated here. */ /* The movi is not explicitly generated here. */
if (ots->val_type == TEMP_VAL_REG) { if (ots->val_type == TEMP_VAL_REG) {
@ -3507,14 +3690,6 @@ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
} }
} }
static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
{
TCGTemp *ots = arg_temp(op->args[0]);
tcg_target_ulong val = op->args[1];
tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]);
}
/* /*
* Specialized code generation for INDEX_op_mov_*. * Specialized code generation for INDEX_op_mov_*.
*/ */
@ -3531,7 +3706,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
ts = arg_temp(op->args[1]); ts = arg_temp(op->args[1]);
/* ENV should not be modified. */ /* ENV should not be modified. */
tcg_debug_assert(!ots->fixed_reg); tcg_debug_assert(!temp_readonly(ots));
/* Note that otype != itype for no-op truncation. */ /* Note that otype != itype for no-op truncation. */
otype = ots->type; otype = ots->type;
@ -3570,7 +3745,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
} }
temp_dead(s, ots); temp_dead(s, ots);
} else { } else {
if (IS_DEAD_ARG(1) && !ts->fixed_reg) { if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
/* the mov can be suppressed */ /* the mov can be suppressed */
if (ots->val_type == TEMP_VAL_REG) { if (ots->val_type == TEMP_VAL_REG) {
s->reg_to_temp[ots->reg] = NULL; s->reg_to_temp[ots->reg] = NULL;
@ -3592,7 +3767,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
* Store the source register into the destination slot * Store the source register into the destination slot
* and leave the destination temp as TEMP_VAL_MEM. * and leave the destination temp as TEMP_VAL_MEM.
*/ */
assert(!ots->fixed_reg); assert(!temp_readonly(ots));
if (!ts->mem_allocated) { if (!ts->mem_allocated) {
temp_allocate_frame(s, ots); temp_allocate_frame(s, ots);
} }
@ -3629,7 +3804,7 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
its = arg_temp(op->args[1]); its = arg_temp(op->args[1]);
/* ENV should not be modified. */ /* ENV should not be modified. */
tcg_debug_assert(!ots->fixed_reg); tcg_debug_assert(!temp_readonly(ots));
itype = its->type; itype = its->type;
vece = TCGOP_VECE(op); vece = TCGOP_VECE(op);
@ -3769,45 +3944,42 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
i_preferred_regs = o_preferred_regs = 0; i_preferred_regs = o_preferred_regs = 0;
if (arg_ct->ialias) { if (arg_ct->ialias) {
o_preferred_regs = op->output_pref[arg_ct->alias_index]; o_preferred_regs = op->output_pref[arg_ct->alias_index];
if (ts->fixed_reg) {
/* if fixed register, we must allocate a new register
if the alias is not the same register */
if (arg != op->args[arg_ct->alias_index]) {
goto allocate_in_reg;
}
} else {
/* if the input is aliased to an output and if it is
not dead after the instruction, we must allocate
a new register and move it */
if (!IS_DEAD_ARG(i)) {
goto allocate_in_reg;
}
/* check if the current register has already been allocated /*
for another input aliased to an output */ * If the input is readonly, then it cannot also be an
if (ts->val_type == TEMP_VAL_REG) { * output and aliased to itself. If the input is not
int k2, i2; * dead after the instruction, we must allocate a new
reg = ts->reg; * register and move it.
for (k2 = 0 ; k2 < k ; k2++) { */
i2 = def->args_ct[nb_oargs + k2].sort_index; if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
if (def->args_ct[i2].ialias && reg == new_args[i2]) { goto allocate_in_reg;
goto allocate_in_reg; }
}
/*
* Check if the current register has already been allocated
* for another input aliased to an output.
*/
if (ts->val_type == TEMP_VAL_REG) {
reg = ts->reg;
for (int k2 = 0; k2 < k; k2++) {
int i2 = def->args_ct[nb_oargs + k2].sort_index;
if (def->args_ct[i2].ialias && reg == new_args[i2]) {
goto allocate_in_reg;
} }
} }
i_preferred_regs = o_preferred_regs;
} }
i_preferred_regs = o_preferred_regs;
} }
temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs); temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs);
reg = ts->reg; reg = ts->reg;
if (tcg_regset_test_reg(arg_ct->regs, reg)) { if (!tcg_regset_test_reg(arg_ct->regs, reg)) {
/* nothing to do : the constraint is satisfied */ allocate_in_reg:
} else { /*
allocate_in_reg: * Allocate a new register matching the constraint
/* allocate a new register matching the constraint * and move the temporary register into it.
and move the temporary register into it */ */
temp_load(s, ts, tcg_target_available_regs[ts->type], temp_load(s, ts, tcg_target_available_regs[ts->type],
i_allocated_regs, 0); i_allocated_regs, 0);
reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs, reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs,
@ -3861,7 +4033,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
ts = arg_temp(arg); ts = arg_temp(arg);
/* ENV should not be modified. */ /* ENV should not be modified. */
tcg_debug_assert(!ts->fixed_reg); tcg_debug_assert(!temp_readonly(ts));
if (arg_ct->oalias && !const_args[arg_ct->alias_index]) { if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
reg = new_args[arg_ct->alias_index]; reg = new_args[arg_ct->alias_index];
@ -3902,7 +4074,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
ts = arg_temp(op->args[i]); ts = arg_temp(op->args[i]);
/* ENV should not be modified. */ /* ENV should not be modified. */
tcg_debug_assert(!ts->fixed_reg); tcg_debug_assert(!temp_readonly(ts));
if (NEED_SYNC_ARG(i)) { if (NEED_SYNC_ARG(i)) {
temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i)); temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
@ -3912,6 +4084,98 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
} }
} }
static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
{
const TCGLifeData arg_life = op->life;
TCGTemp *ots, *itsl, *itsh;
TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
/* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
tcg_debug_assert(TCGOP_VECE(op) == MO_64);
ots = arg_temp(op->args[0]);
itsl = arg_temp(op->args[1]);
itsh = arg_temp(op->args[2]);
/* ENV should not be modified. */
tcg_debug_assert(!temp_readonly(ots));
/* Allocate the output register now. */
if (ots->val_type != TEMP_VAL_REG) {
TCGRegSet allocated_regs = s->reserved_regs;
TCGRegSet dup_out_regs =
tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
/* Make sure to not spill the input registers. */
if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
tcg_regset_set_reg(allocated_regs, itsl->reg);
}
if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
tcg_regset_set_reg(allocated_regs, itsh->reg);
}
ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
op->output_pref[0], ots->indirect_base);
ots->val_type = TEMP_VAL_REG;
ots->mem_coherent = 0;
s->reg_to_temp[ots->reg] = ots;
}
/* Promote dup2 of immediates to dupi_vec. */
if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
MemOp vece = MO_64;
if (val == dup_const(MO_8, val)) {
vece = MO_8;
} else if (val == dup_const(MO_16, val)) {
vece = MO_16;
} else if (val == dup_const(MO_32, val)) {
vece = MO_32;
}
tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
goto done;
}
/* If the two inputs form one 64-bit value, try dupm_vec. */
if (itsl + 1 == itsh && itsl->base_type == TCG_TYPE_I64) {
if (!itsl->mem_coherent) {
temp_sync(s, itsl, s->reserved_regs, 0, 0);
}
if (!itsh->mem_coherent) {
temp_sync(s, itsh, s->reserved_regs, 0, 0);
}
#ifdef HOST_WORDS_BIGENDIAN
TCGTemp *its = itsh;
#else
TCGTemp *its = itsl;
#endif
if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
its->mem_base->reg, its->mem_offset)) {
goto done;
}
}
/* Fall back to generic expansion. */
return false;
done:
if (IS_DEAD_ARG(1)) {
temp_dead(s, itsl);
}
if (IS_DEAD_ARG(2)) {
temp_dead(s, itsh);
}
if (NEED_SYNC_ARG(0)) {
temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
} else if (IS_DEAD_ARG(0)) {
temp_dead(s, ots);
}
return true;
}
#ifdef TCG_TARGET_STACK_GROWSUP #ifdef TCG_TARGET_STACK_GROWSUP
#define STACK_DIR(x) (-(x)) #define STACK_DIR(x) (-(x))
#else #else
@ -4034,7 +4298,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
ts = arg_temp(arg); ts = arg_temp(arg);
/* ENV should not be modified. */ /* ENV should not be modified. */
tcg_debug_assert(!ts->fixed_reg); tcg_debug_assert(!temp_readonly(ts));
reg = tcg_target_call_oarg_regs[i]; reg = tcg_target_call_oarg_regs[i];
tcg_debug_assert(s->reg_to_temp[reg] == NULL); tcg_debug_assert(s->reg_to_temp[reg] == NULL);
@ -4298,11 +4562,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
case INDEX_op_mov_vec: case INDEX_op_mov_vec:
tcg_reg_alloc_mov(s, op); tcg_reg_alloc_mov(s, op);
break; break;
case INDEX_op_movi_i32:
case INDEX_op_movi_i64:
case INDEX_op_dupi_vec:
tcg_reg_alloc_movi(s, op);
break;
case INDEX_op_dup_vec: case INDEX_op_dup_vec:
tcg_reg_alloc_dup(s, op); tcg_reg_alloc_dup(s, op);
break; break;
@ -4334,6 +4593,11 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
case INDEX_op_call: case INDEX_op_call:
tcg_reg_alloc_call(s, op); tcg_reg_alloc_call(s, op);
break; break;
case INDEX_op_dup2_vec:
if (tcg_reg_alloc_dup2(s, op)) {
break;
}
/* fall through */
default: default:
/* Sanity check that we've not introduced any unhandled opcodes. */ /* Sanity check that we've not introduced any unhandled opcodes. */
tcg_debug_assert(tcg_op_supported(opc)); tcg_debug_assert(tcg_op_supported(opc));

View File

@ -593,7 +593,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
t1 = tci_read_r32(regs, &tb_ptr); t1 = tci_read_r32(regs, &tb_ptr);
tci_write_reg32(regs, t0, t1); tci_write_reg32(regs, t0, t1);
break; break;
case INDEX_op_movi_i32: case INDEX_op_tci_movi_i32:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_i32(&tb_ptr); t1 = tci_read_i32(&tb_ptr);
tci_write_reg32(regs, t0, t1); tci_write_reg32(regs, t0, t1);
@ -864,7 +864,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
t1 = tci_read_r64(regs, &tb_ptr); t1 = tci_read_r64(regs, &tb_ptr);
tci_write_reg64(regs, t0, t1); tci_write_reg64(regs, t0, t1);
break; break;
case INDEX_op_movi_i64: case INDEX_op_tci_movi_i64:
t0 = *tb_ptr++; t0 = *tb_ptr++;
t1 = tci_read_i64(&tb_ptr); t1 = tci_read_i64(&tb_ptr);
tci_write_reg64(regs, t0, t1); tci_write_reg64(regs, t0, t1);

View File

@ -529,13 +529,13 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
uint8_t *old_code_ptr = s->code_ptr; uint8_t *old_code_ptr = s->code_ptr;
uint32_t arg32 = arg; uint32_t arg32 = arg;
if (type == TCG_TYPE_I32 || arg == arg32) { if (type == TCG_TYPE_I32 || arg == arg32) {
tcg_out_op_t(s, INDEX_op_movi_i32); tcg_out_op_t(s, INDEX_op_tci_movi_i32);
tcg_out_r(s, t0); tcg_out_r(s, t0);
tcg_out32(s, arg32); tcg_out32(s, arg32);
} else { } else {
tcg_debug_assert(type == TCG_TYPE_I64); tcg_debug_assert(type == TCG_TYPE_I64);
#if TCG_TARGET_REG_BITS == 64 #if TCG_TARGET_REG_BITS == 64
tcg_out_op_t(s, INDEX_op_movi_i64); tcg_out_op_t(s, INDEX_op_tci_movi_i64);
tcg_out_r(s, t0); tcg_out_r(s, t0);
tcg_out64(s, arg); tcg_out64(s, arg);
#else #else
@ -814,8 +814,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
break; break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */
default: default:
tcg_abort(); tcg_abort();