helper-head: Add fpu/softfloat-types.h

softmmu: Use memmove in flatview_write_continue
 tcg: Add sign param to probe_access_flags, probe_access_full
 tcg: Convert TARGET_TB_PCREL to CF_PCREL
 tcg: Simplify temporary lifetimes for translators
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmP/jWUdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9TcQf7B7+K/lrWvUVhZ4By
 7zrNIJKGwsxuQhGq9mS2Nx9ds9es5mS8SQT1ieNG6a51n6Gq8S2B8yFCRFdlDZWD
 /QrMSjxrs+4c6pNHZu4v20Huy/VW0y004eYdGc8Lu5cXTDpy1mUZ2PrZYlWNQEVY
 4Ts5rTWdSZHRU1+dbB8MTWlml9//++TPB+ZvzqSb8jnRJfw4z7ijVJjUEEb93gQg
 8S3JiPU6d1ZzoXzGMK7Wd0MMi4pQUZkaX1HOpzvmQXjeErSP87CZvvji/Cucm8iW
 rJ4U7t99nmZDqG9W1zdZfYfKNp4nLlfVldQWFVIx45awSPS0mCzrmeBT5NHyrxYK
 4OtuNQ==
 =vzqE
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20230301' of https://gitlab.com/rth7680/qemu into staging

helper-head: Add fpu/softfloat-types.h
softmmu: Use memmove in flatview_write_continue
tcg: Add sign param to probe_access_flags, probe_access_full
tcg: Convert TARGET_TB_PCREL to CF_PCREL
tcg: Simplify temporary lifetimes for translators

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmP/jWUdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9TcQf7B7+K/lrWvUVhZ4By
# 7zrNIJKGwsxuQhGq9mS2Nx9ds9es5mS8SQT1ieNG6a51n6Gq8S2B8yFCRFdlDZWD
# /QrMSjxrs+4c6pNHZu4v20Huy/VW0y004eYdGc8Lu5cXTDpy1mUZ2PrZYlWNQEVY
# 4Ts5rTWdSZHRU1+dbB8MTWlml9//++TPB+ZvzqSb8jnRJfw4z7ijVJjUEEb93gQg
# 8S3JiPU6d1ZzoXzGMK7Wd0MMi4pQUZkaX1HOpzvmQXjeErSP87CZvvji/Cucm8iW
# rJ4U7t99nmZDqG9W1zdZfYfKNp4nLlfVldQWFVIx45awSPS0mCzrmeBT5NHyrxYK
# 4OtuNQ==
# =vzqE
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 01 Mar 2023 17:37:41 GMT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20230301' of https://gitlab.com/rth7680/qemu: (62 commits)
  tcg: Update docs/devel/tcg-ops.rst for temporary changes
  tcg: Remove tcg_temp_local_new_*, tcg_const_local_*
  exec/gen-icount: Don't use tcg_temp_local_new_i32
  target/xtensa: Don't use tcg_temp_local_new_*
  target/ppc: Don't use tcg_temp_local_new
  target/mips: Don't use tcg_temp_local_new
  target/i386: Don't use tcg_temp_local_new
  target/hppa: Don't use tcg_temp_local_new
  target/hexagon/idef-parser: Drop gen_tmp_local
  target/hexagon: Don't use tcg_temp_local_new_*
  target/cris: Don't use tcg_temp_local_new
  target/arm: Don't use tcg_temp_local_new_*
  target/arm: Drop copies in gen_sve_{ldr,str}
  tcg: Change default temp lifetime to TEMP_TB
  tcg: Don't re-use TEMP_TB temporaries
  accel/tcg/plugin: Tidy plugin_gen_disable_mem_helpers
  accel/tcg/plugin: Use tcg_temp_ebb_*
  tcg: Use tcg_constant_ptr in do_dup
  tcg: Use tcg_temp_ebb_new_* in tcg/
  tcg: Add tcg_temp_ebb_new_{i32,i64,ptr}
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2023-03-01 19:19:20 +00:00
commit a2b5f8b8ab
84 changed files with 870 additions and 890 deletions

View File

@ -25,7 +25,7 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{

View File

@ -183,7 +183,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
@ -235,7 +235,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@ -254,21 +254,46 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
tb = tb_jmp_cache_get_tb(jc, hash);
if (likely(tb &&
tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) {
return tb;
if (cflags & CF_PCREL) {
/* Use acquire to ensure current load of pc from jc. */
tb = qatomic_load_acquire(&jc->array[hash].tb);
if (likely(tb &&
jc->array[hash].pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
jc->array[hash].pc = pc;
/* Use store_release on tb to ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
} else {
/* Use rcu_read to ensure current load of pc from *tb. */
tb = qatomic_rcu_read(&jc->array[hash].tb);
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
tb_cflags(tb) == cflags)) {
return tb;
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
}
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
tb_jmp_cache_set(jc, hash, tb, pc);
return tb;
}
@ -457,9 +482,9 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
assert(!TARGET_TB_PCREL);
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
assert(cc->set_pc);
cc->set_pc(cpu, tb_pc(last_tb));
cc->set_pc(cpu, last_tb->pc);
}
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, last_tb);
@ -957,7 +982,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
/* Use the pc value already stored in tb->pc. */
qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
}
#ifndef CONFIG_USER_ONLY

View File

@ -1589,12 +1589,12 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
return flags;
}
int probe_access_full(CPUArchState *env, target_ulong addr,
int probe_access_full(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
{
int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
nonfault, phost, pfull, retaddr);
/* Handle clean RAM pages. */
@ -1606,14 +1606,25 @@ int probe_access_full(CPUArchState *env, target_ulong addr,
return flags;
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
CPUTLBEntryFull *full;
int flags;
return probe_access_full(env, addr, access_type, mmu_idx,
nonfault, phost, &full, retaddr);
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
nonfault, phost, &full, retaddr);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
notdirty_write(env_cpu(env), addr, 1, full, retaddr);
flags &= ~TLB_NOTDIRTY;
}
return flags;
}
void *probe_access(CPUArchState *env, target_ulong addr, int size,

View File

@ -57,11 +57,11 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
/* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return cpu->cc->get_pc(cpu);
#else
return tb_pc(tb);
#endif
if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu);
} else {
return tb->pc;
}
}
extern int64_t max_delay;

View File

@ -328,7 +328,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
for (insn = 0; insn < tb->icount; insn++) {
/* FIXME: This replicates the restore_state_to_opc() logic. */
q[insn].address = tcg_ctx->gen_insn_data[insn][0];
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
} else {
#if defined(TARGET_I386)

View File

@ -93,11 +93,13 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
{
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_i32 meminfo = tcg_const_i32(info);
TCGv_i64 vaddr64 = tcg_temp_new_i64();
TCGv_ptr udata = tcg_const_ptr(NULL);
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
TCGv_i64 vaddr64 = tcg_temp_ebb_new_i64();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_i32(meminfo, info);
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
tcg_gen_extu_tl_i64(vaddr64, vaddr);
@ -112,9 +114,10 @@ static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
static void gen_empty_udata_cb(void)
{
TCGv_i32 cpu_index = tcg_temp_new_i32();
TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
@ -129,9 +132,10 @@ static void gen_empty_udata_cb(void)
*/
static void gen_empty_inline_cb(void)
{
TCGv_i64 val = tcg_temp_new_i64();
TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
TCGv_i64 val = tcg_temp_ebb_new_i64();
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_ld_i64(val, ptr, 0);
/* pass an immediate != 0 so that it doesn't get optimized away */
tcg_gen_addi_i64(val, val, 0xdeadface);
@ -151,9 +155,9 @@ static void gen_empty_mem_cb(TCGv addr, uint32_t info)
*/
static void gen_empty_mem_helper(void)
{
TCGv_ptr ptr;
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
ptr = tcg_const_ptr(NULL);
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
@ -626,8 +630,6 @@ static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
void plugin_gen_disable_mem_helpers(void)
{
TCGv_ptr ptr;
/*
* We could emit the clearing unconditionally and be done. However, this can
* be wasteful if for instance plugins don't track memory accesses, or if
@ -640,10 +642,8 @@ void plugin_gen_disable_mem_helpers(void)
if (!tcg_ctx->plugin_tb->mem_helper) {
return;
}
ptr = tcg_const_ptr(NULL);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
}
static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,

View File

@ -14,53 +14,15 @@
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
* For TARGET_TB_PCREL, accesses to 'pc' must be protected by
* a load_acquire/store_release to 'tb'.
* For CF_PCREL, accesses to 'pc' must be protected by a
* load_acquire/store_release to 'tb'.
*/
struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
#if TARGET_TB_PCREL
target_ulong pc;
#endif
} array[TB_JMP_CACHE_SIZE];
};
static inline TranslationBlock *
tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
{
#if TARGET_TB_PCREL
/* Use acquire to ensure current load of pc from jc. */
return qatomic_load_acquire(&jc->array[hash].tb);
#else
/* Use rcu_read to ensure current load of pc from *tb. */
return qatomic_rcu_read(&jc->array[hash].tb);
#endif
}
static inline target_ulong
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
{
#if TARGET_TB_PCREL
return jc->array[hash].pc;
#else
return tb_pc(tb);
#endif
}
static inline void
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
TranslationBlock *tb, target_ulong pc)
{
#if TARGET_TB_PCREL
jc->array[hash].pc = pc;
/* Use store_release on tb to ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
#else
/* Use the pc value already stored in tb->pc. */
qatomic_set(&jc->array[hash].tb, tb);
#endif
}
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */

View File

@ -44,7 +44,7 @@ static bool tb_cmp(const void *ap, const void *bp)
const TranslationBlock *a = ap;
const TranslationBlock *b = bp;
return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
return ((tb_cflags(a) & CF_PCREL || a->pc == b->pc) &&
a->cs_base == b->cs_base &&
a->flags == b->flags &&
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
@ -847,13 +847,13 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
{
CPUState *cpu;
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
/* A TB may be at any virtual address */
CPU_FOREACH(cpu) {
tcg_flush_jmp_cache(cpu);
}
} else {
uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
uint32_t h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
CPUJumpCache *jc = cpu->tb_jmp_cache;
@ -885,7 +885,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb_page_addr0(tb);
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
@ -966,7 +966,7 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_record(tb, p, p2);
/* add in the hash table */
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cflags, tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);

View File

@ -134,7 +134,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
} else {
prev = tcg_ctx->gen_insn_data[i - 1][j];
}
@ -169,8 +169,8 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
}
memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
if (!TARGET_TB_PCREL) {
data[0] = tb_pc(tb);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
}
/*
@ -280,7 +280,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
tcg_func_start(tcg_ctx);
tcg_ctx->cpu = env_cpu(env);
gen_intermediate_code(env_cpu(env), tb, *max_insns, pc, host_pc);
gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
assert(tb->size != 0);
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
@ -340,9 +340,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
#if !TARGET_TB_PCREL
tb->pc = pc;
#endif
if (!(cflags & CF_PCREL)) {
tb->pc = pc;
}
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
@ -407,8 +407,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->tc.size = gen_code_size;
/*
* For TARGET_TB_PCREL, attribute all executions of the generated
* code to its first mapping.
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));

View File

@ -42,7 +42,7 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db)
{
@ -55,7 +55,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
db->pc_next = pc;
db->is_jmp = DISAS_NEXT;
db->num_insns = 0;
db->max_insns = max_insns;
db->max_insns = *max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
@ -78,7 +78,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
while (true) {
db->num_insns++;
*max_insns = ++db->num_insns;
ops->insn_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */

View File

@ -761,13 +761,14 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
}
int probe_access_flags(CPUArchState *env, target_ulong addr,
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
int flags;
flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
*phost = flags ? NULL : g2h(env_cpu(env), addr);
return flags;
}

View File

@ -7,67 +7,51 @@ TCG Intermediate Representation
Introduction
============
TCG (Tiny Code Generator) began as a generic backend for a C
compiler. It was simplified to be used in QEMU. It also has its roots
in the QOP code generator written by Paul Brook.
TCG (Tiny Code Generator) began as a generic backend for a C compiler.
It was simplified to be used in QEMU. It also has its roots in the
QOP code generator written by Paul Brook.
Definitions
===========
TCG receives RISC-like *TCG ops* and performs some optimizations on them,
including liveness analysis and trivial constant expression
evaluation. TCG ops are then implemented in the host CPU back end,
also known as the TCG target.
The TCG *target* is the architecture for which we generate the
code. It is of course not the same as the "target" of QEMU which is
the emulated architecture. As TCG started as a generic C backend used
for cross compiling, it is assumed that the TCG target is different
from the host, although it is never the case for QEMU.
The TCG *target* is the architecture for which we generate the code.
It is of course not the same as the "target" of QEMU which is the
emulated architecture. As TCG started as a generic C backend used
for cross compiling, the assumption was that TCG target might be
different from the host, although this is never the case for QEMU.
In this document, we use *guest* to specify what architecture we are
emulating; *target* always means the TCG target, the machine on which
we are running QEMU.
A TCG *function* corresponds to a QEMU Translated Block (TB).
A TCG *temporary* is a variable only live in a basic block. Temporaries are allocated explicitly in each function.
A TCG *local temporary* is a variable only live in a function. Local temporaries are allocated explicitly in each function.
A TCG *global* is a variable which is live in all the functions
(equivalent of a C global variable). They are defined before the
functions defined. A TCG global can be a memory location (e.g. a QEMU
CPU register), a fixed host register (e.g. the QEMU CPU state pointer)
or a memory location which is stored in a register outside QEMU TBs
(not implemented yet).
A TCG *basic block* corresponds to a list of instructions terminated
by a branch instruction.
An operation with *undefined behavior* may result in a crash.
An operation with *unspecified behavior* shall not crash. However,
the result may be one of several possibilities so may be considered
an *undefined result*.
Intermediate representation
===========================
Basic Blocks
============
Introduction
------------
A TCG *basic block* is a single entry, multiple exit region which
corresponds to a list of instructions terminated by a label, or
any branch instruction.
TCG instructions operate on variables which are temporaries, local
temporaries or globals. TCG instructions and variables are strongly
typed. Two types are supported: 32 bit integers and 64 bit
integers. Pointers are defined as an alias to 32 bit or 64 bit
integers depending on the TCG target word size.
A TCG *extended basic block* is a single entry, multiple exit region
which corresponds to a list of instructions terminated by a label or
an unconditional branch. Specifically, an extended basic block is
a sequence of basic blocks connected by the fall-through paths of
zero or more conditional branch instructions.
Each instruction has a fixed number of output variable operands, input
variable operands and always constant operands.
Operations
==========
The notable exception is the call instruction which has a variable
number of outputs and inputs.
TCG instructions or *ops* operate on TCG *variables*, both of which
are strongly typed. Each instruction has a fixed number of output
variable operands, input variable operands and constant operands.
Vector instructions have a field specifying the element size within
the vector. The notable exception is the call instruction which has
a variable number of outputs and inputs.
In the textual form, output operands usually come first, followed by
input operands, followed by constant operands. The output type is
@ -77,68 +61,127 @@ included in the instruction name. Constants are prefixed with a '$'.
add_i32 t0, t1, t2 /* (t0 <- t1 + t2) */
Variables
=========
Assumptions
-----------
* ``TEMP_FIXED``
Basic blocks
^^^^^^^^^^^^
There is one TCG *fixed global* variable, ``cpu_env``, which is
live in all translation blocks, and holds a pointer to ``CPUArchState``.
This variable is held in a host cpu register at all times in all
translation blocks.
* Basic blocks end after branches (e.g. brcond_i32 instruction),
goto_tb and exit_tb instructions.
* ``TEMP_GLOBAL``
* Basic blocks start after the end of a previous basic block, or at a
set_label instruction.
A TCG *global* is a variable which is live in all translation blocks,
and corresponds to memory location that is within ``CPUArchState``.
These may be specified as an offset from ``cpu_env``, in which case
they are called *direct globals*, or may be specified as an offset
from a direct global, in which case they are called *indirect globals*.
Even indirect globals should still reference memory within
``CPUArchState``. All TCG globals are defined during
``TCGCPUOps.initialize``, before any translation blocks are generated.
After the end of a basic block, the content of temporaries is
destroyed, but local temporaries and globals are preserved.
* ``TEMP_CONST``
Floating point types
^^^^^^^^^^^^^^^^^^^^
A TCG *constant* is a variable which is live throughout the entire
translation block, and contains a constant value. These variables
are allocated on demand during translation and are hashed so that
there is exactly one variable holding a given value.
* Floating point types are not supported yet
* ``TEMP_TB``
Pointers
^^^^^^^^
A TCG *translation block temporary* is a variable which is live
throughout the entire translation block, but dies on any exit.
These temporaries are allocated explicitly during translation.
* Depending on the TCG target, pointer size is 32 bit or 64
bit. The type ``TCG_TYPE_PTR`` is an alias to ``TCG_TYPE_I32`` or
``TCG_TYPE_I64``.
* ``TEMP_EBB``
A TCG *extended basic block temporary* is a variable which is live
throughout an extended basic block, but dies on any exit.
These temporaries are allocated explicitly during translation.
Types
=====
* ``TCG_TYPE_I32``
A 32-bit integer.
* ``TCG_TYPE_I64``
A 64-bit integer. For 32-bit hosts, such variables are split into a pair
of variables with ``type=TCG_TYPE_I32`` and ``base_type=TCG_TYPE_I64``.
The ``temp_subindex`` for each indicates where it falls within the
host-endian representation.
* ``TCG_TYPE_PTR``
An alias for ``TCG_TYPE_I32`` or ``TCG_TYPE_I64``, depending on the size
of a pointer for the host.
* ``TCG_TYPE_REG``
An alias for ``TCG_TYPE_I32`` or ``TCG_TYPE_I64``, depending on the size
of the integer registers for the host. This may be larger
than ``TCG_TYPE_PTR`` depending on the host ABI.
* ``TCG_TYPE_I128``
A 128-bit integer. For all hosts, such variables are split into a number
of variables with ``type=TCG_TYPE_REG`` and ``base_type=TCG_TYPE_I128``.
The ``temp_subindex`` for each indicates where it falls within the
host-endian representation.
* ``TCG_TYPE_V64``
A 64-bit vector. This type is valid only if the TCG target
sets ``TCG_TARGET_HAS_v64``.
* ``TCG_TYPE_V128``
A 128-bit vector. This type is valid only if the TCG target
sets ``TCG_TARGET_HAS_v128``.
* ``TCG_TYPE_V256``
A 256-bit vector. This type is valid only if the TCG target
sets ``TCG_TARGET_HAS_v256``.
Helpers
^^^^^^^
=======
* Using the tcg_gen_helper_x_y it is possible to call any function
taking i32, i64 or pointer types. By default, before calling a helper,
all globals are stored at their canonical location and it is assumed
that the function can modify them. By default, the helper is allowed to
modify the CPU state or raise an exception.
Helpers are registered in a guest-specific ``helper.h``,
which is processed to generate ``tcg_gen_helper_*`` functions.
With these functions it is possible to call a function taking
i32, i64, i128 or pointer types.
This can be overridden using the following function modifiers:
By default, before calling a helper, all globals are stored at their
canonical location. By default, the helper is allowed to modify the
CPU state (including the state represented by tcg globals)
or may raise an exception. This default can be overridden using the
following function modifiers:
- ``TCG_CALL_NO_READ_GLOBALS`` means that the helper does not read globals,
either directly or via an exception. They will not be saved to their
canonical locations before calling the helper.
* ``TCG_CALL_NO_WRITE_GLOBALS``
- ``TCG_CALL_NO_WRITE_GLOBALS`` means that the helper does not modify any globals.
They will only be saved to their canonical location before calling helpers,
but they won't be reloaded afterwards.
The helper does not modify any globals, but may read them.
Globals will be saved to their canonical location before calling helpers,
but need not be reloaded afterwards.
- ``TCG_CALL_NO_SIDE_EFFECTS`` means that the call to the function is removed if
the return value is not used.
* ``TCG_CALL_NO_READ_GLOBALS``
Note that ``TCG_CALL_NO_READ_GLOBALS`` implies ``TCG_CALL_NO_WRITE_GLOBALS``.
The helper does not read globals, either directly or via an exception.
They will not be saved to their canonical locations before calling
the helper. This implies ``TCG_CALL_NO_WRITE_GLOBALS``.
On some TCG targets (e.g. x86), several calling conventions are
supported.
* ``TCG_CALL_NO_SIDE_EFFECTS``
Branches
^^^^^^^^
* Use the instruction 'br' to jump to a label.
The call to the helper function may be removed if the return value is
not used. This means that it may not modify any CPU state nor may it
raise an exception.
Code Optimizations
------------------
==================
When generating instructions, you can count on at least the following
optimizations:
@ -908,20 +951,9 @@ Recommended coding rules for best performance
often modified, e.g. the integer registers and the condition
codes. TCG will be able to use host registers to store them.
- Avoid globals stored in fixed registers. They must be used only to
store the pointer to the CPU state and possibly to store a pointer
to a register window.
- Use temporaries. Use local temporaries only when really needed,
e.g. when you need to use a value after a jump. Local temporaries
introduce a performance hit in the current TCG implementation: their
content is saved to memory at end of each basic block.
- Free temporaries and local temporaries when they are no longer used
(tcg_temp_free). Since tcg_const_x() also creates a temporary, you
should free it after it is used. Freeing temporaries does not yield
a better generated code, but it reduces the memory usage of TCG and
the speed of the translation.
- Free temporaries when they are no longer used (``tcg_temp_free``).
Since ``tcg_const_x`` also creates a temporary, you should free it
after it is used.
- Don't hesitate to use helpers for complicated or seldom used guest
instructions. There is little performance advantage in using TCG to
@ -932,10 +964,6 @@ Recommended coding rules for best performance
the instruction is mostly doing loads and stores, and in those cases
inline TCG may still be faster for longer sequences.
- The hard limit on the number of TCG instructions you can generate
per guest instruction is set by ``MAX_OP_PER_INSTR`` in ``exec-all.h`` --
you cannot exceed this without risking a buffer overrun.
- Use the 'discard' instruction if you know that TCG won't be able to
prove that a given global is "dead" at a given program point. The
x86 guest uses it to improve the condition codes optimisation.

View File

@ -54,9 +54,6 @@
# error TARGET_PAGE_BITS must be defined in cpu-param.h
# endif
#endif
#ifndef TARGET_TB_PCREL
# define TARGET_TB_PCREL 0
#endif
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)

View File

@ -447,6 +447,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* probe_access_flags:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @size: size of the access
* @access_type: read, write or execute permission
* @mmu_idx: MMU index to use for lookup
* @nonfault: suppress the fault
@ -461,7 +462,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/
int probe_access_flags(CPUArchState *env, target_ulong addr,
int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
@ -474,7 +475,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*/
int probe_access_full(CPUArchState *env, target_ulong addr,
int probe_access_full(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
@ -505,22 +506,20 @@ struct tb_tc {
};
struct TranslationBlock {
#if !TARGET_TB_PCREL
/*
* Guest PC corresponding to this block. This must be the true
* virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
* targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
* privilege, must store those bits elsewhere.
*
* If TARGET_TB_PCREL, the opcodes for the TranslationBlock are
* written such that the TB is associated only with the physical
* page and may be run in any virtual address context. In this case,
* PC must always be taken from ENV in a target-specific manner.
* If CF_PCREL, the opcodes for the TranslationBlock are written
* such that the TB is associated only with the physical page and
* may be run in any virtual address context. In this case, PC
* must always be taken from ENV in a target-specific manner.
* Unwind information is taken as offsets from the page, to be
* deposited into the "current" PC.
*/
target_ulong pc;
#endif
/*
* Target-specific data associated with the TranslationBlock, e.g.:
@ -545,6 +544,7 @@ struct TranslationBlock {
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
#define CF_CLUSTER_SHIFT 24
@ -613,16 +613,6 @@ struct TranslationBlock {
uintptr_t jmp_dest[2];
};
/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
static inline target_ulong tb_pc(const TranslationBlock *tb)
{
#if TARGET_TB_PCREL
qemu_build_not_reached();
#else
return tb->pc;
#endif
}
/* Hide the qatomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{

View File

@ -9,22 +9,14 @@ static TCGOp *icount_start_insn;
static inline void gen_io_start(void)
{
TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, cpu_env,
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
tcg_temp_free_i32(tmp);
}
static inline void gen_tb_start(const TranslationBlock *tb)
{
TCGv_i32 count;
if (tb_cflags(tb) & CF_USE_ICOUNT) {
count = tcg_temp_local_new_i32();
} else {
count = tcg_temp_new_i32();
}
TCGv_i32 count = tcg_temp_new_i32();
tcg_gen_ld_i32(count, cpu_env,
offsetof(ArchCPU, neg.icount_decr.u32) -

View File

@ -18,6 +18,8 @@
#ifndef EXEC_HELPER_HEAD_H
#define EXEC_HELPER_HEAD_H
#include "fpu/softfloat-types.h"
#define HELPER(name) glue(helper_, name)
/* Some types that make sense in C, but not for TCG. */

View File

@ -37,7 +37,7 @@
* This function must be provided by the target, which should create
* the target-specific DisasContext, and then invoke translator_loop.
*/
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc);
/**
@ -146,7 +146,7 @@ typedef struct TranslatorOps {
* - When single-stepping is enabled (system-wide or on the current vCPU).
* - When too many instructions have been translated.
*/
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc,
const TranslatorOps *ops, DisasContextBase *db);

View File

@ -828,14 +828,12 @@ static inline void tcg_gen_plugin_cb_end(void)
#if TARGET_LONG_BITS == 32
#define tcg_temp_new() tcg_temp_new_i32()
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new() tcg_temp_local_new_i32()
#define tcg_temp_free tcg_temp_free_i32
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
#define tcg_temp_new() tcg_temp_new_i64()
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new() tcg_temp_local_new_i64()
#define tcg_temp_free tcg_temp_free_i64
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
@ -1285,6 +1283,11 @@ static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s)
glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s);
}
static inline void tcg_gen_movi_ptr(TCGv_ptr d, intptr_t s)
{
glue(tcg_gen_movi_,PTR)((NAT)d, s);
}
static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a,
intptr_t b, TCGLabel *label)
{

View File

@ -431,13 +431,15 @@ typedef enum TCGTempVal {
} TCGTempVal;
typedef enum TCGTempKind {
/* Temp is dead at the end of all basic blocks. */
TEMP_NORMAL,
/* Temp is live across conditional branch, but dead otherwise. */
/*
* Temp is dead at the end of the extended basic block (EBB),
* the single-entry multiple-exit region that falls through
* conditional branches.
*/
TEMP_EBB,
/* Temp is saved across basic blocks but dead at the end of TBs. */
TEMP_LOCAL,
/* Temp is saved across both basic blocks and translation blocks. */
/* Temp is live across the entire translation block, but dead at end. */
TEMP_TB,
/* Temp is live across the entire translation block, and between them. */
TEMP_GLOBAL,
/* Temp is in a fixed register. */
TEMP_FIXED,
@ -610,7 +612,7 @@ struct TCGContext {
#endif
GHashTable *const_table[TCG_TYPE_COUNT];
TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
TCGTempSet free_temps[TCG_TYPE_COUNT];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
QTAILQ_HEAD(, TCGOp) ops, free_ops;
@ -853,7 +855,7 @@ void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
intptr_t, const char *);
TCGTemp *tcg_temp_new_internal(TCGType, bool);
TCGTemp *tcg_temp_new_internal(TCGType, TCGTempKind);
void tcg_temp_free_internal(TCGTemp *);
TCGv_vec tcg_temp_new_vec(TCGType type);
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
@ -890,15 +892,16 @@ static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
return temp_tcgv_i32(t);
}
static inline TCGv_i32 tcg_temp_new_i32(void)
/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
static inline TCGv_i32 tcg_temp_ebb_new_i32(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB);
return temp_tcgv_i32(t);
}
static inline TCGv_i32 tcg_temp_local_new_i32(void)
static inline TCGv_i32 tcg_temp_new_i32(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB);
return temp_tcgv_i32(t);
}
@ -909,27 +912,29 @@ static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
return temp_tcgv_i64(t);
}
static inline TCGv_i64 tcg_temp_new_i64(void)
/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
static inline TCGv_i64 tcg_temp_ebb_new_i64(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB);
return temp_tcgv_i64(t);
}
static inline TCGv_i64 tcg_temp_local_new_i64(void)
static inline TCGv_i64 tcg_temp_new_i64(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB);
return temp_tcgv_i64(t);
}
/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
static inline TCGv_i128 tcg_temp_ebb_new_i128(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB);
return temp_tcgv_i128(t);
}
static inline TCGv_i128 tcg_temp_new_i128(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, false);
return temp_tcgv_i128(t);
}
static inline TCGv_i128 tcg_temp_local_new_i128(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, true);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB);
return temp_tcgv_i128(t);
}
@ -940,15 +945,16 @@ static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
return temp_tcgv_ptr(t);
}
static inline TCGv_ptr tcg_temp_new_ptr(void)
/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
static inline TCGv_ptr tcg_temp_ebb_new_ptr(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB);
return temp_tcgv_ptr(t);
}
static inline TCGv_ptr tcg_temp_local_new_ptr(void)
static inline TCGv_ptr tcg_temp_new_ptr(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB);
return temp_tcgv_ptr(t);
}
@ -1054,8 +1060,6 @@ void tcg_optimize(TCGContext *s);
/* Allocate a new temporary and initialize it with a constant. */
TCGv_i32 tcg_const_i32(int32_t val);
TCGv_i64 tcg_const_i64(int64_t val);
TCGv_i32 tcg_const_local_i32(int32_t val);
TCGv_i64 tcg_const_local_i64(int64_t val);
TCGv_vec tcg_const_zeros_vec(TCGType);
TCGv_vec tcg_const_ones_vec(TCGType);
TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
@ -1083,11 +1087,9 @@ TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
#if UINTPTR_MAX == UINT32_MAX
# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x)))
#else
# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x)))
#endif

View File

@ -37,7 +37,7 @@ ssize_t softmmu_strlen_user(CPUArchState *env, target_ulong addr)
/* Find the number of bytes remaining in the page. */
left_in_page = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
flags = probe_access_flags(env, addr, MMU_DATA_LOAD,
flags = probe_access_flags(env, addr, 0, MMU_DATA_LOAD,
mmu_idx, true, &h, 0);
if (flags & TLB_INVALID_MASK) {
return -1;

View File

@ -2637,7 +2637,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
} else {
/* RAM case */
ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
memcpy(ram_ptr, buf, l);
memmove(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}

View File

@ -3043,7 +3043,7 @@ static const TranslatorOps alpha_tr_ops = {
.disas_log = alpha_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -31,8 +31,6 @@
# define TARGET_PAGE_BITS_VARY
# define TARGET_PAGE_BITS_MIN 10
# define TARGET_TB_PCREL 1
/*
* Cache the attrs and shareability fields from the page table entry.
*

View File

@ -78,17 +78,17 @@ static vaddr arm_cpu_get_pc(CPUState *cs)
void arm_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
/* The program counter is always up to date with TARGET_TB_PCREL. */
if (!TARGET_TB_PCREL) {
/* The program counter is always up to date with CF_PCREL. */
if (!(tb_cflags(tb) & CF_PCREL)) {
CPUARMState *env = cs->env_ptr;
/*
* It's OK to look at env for the current mode here, because it's
* never possible for an AArch64 TB to chain to an AArch32 TB.
*/
if (is_a64(env)) {
env->pc = tb_pc(tb);
env->pc = tb->pc;
} else {
env->regs[15] = tb_pc(tb);
env->regs[15] = tb->pc;
}
}
}
@ -100,7 +100,7 @@ void arm_restore_state_to_opc(CPUState *cs,
CPUARMState *env = cs->env_ptr;
if (is_a64(env)) {
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
} else {
env->pc = data[0];
@ -108,7 +108,7 @@ void arm_restore_state_to_opc(CPUState *cs,
env->condexec_bits = 0;
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
} else {
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
} else {
env->regs[15] = data[0];
@ -1557,6 +1557,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
Error *local_err = NULL;
bool no_aa32 = false;
/* Use pc-relative instructions in system-mode */
#ifndef CONFIG_USER_ONLY
cs->tcg_cflags |= CF_PCREL;
#endif
/* If we needed to query the host kernel for the CPU features
* then it's possible that might have failed in the initfn, but
* this is the first point where we can report it.

View File

@ -259,7 +259,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
int flags;
env->tlb_fi = fi;
flags = probe_access_full(env, addr, MMU_DATA_LOAD,
flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
arm_to_core_mmu_idx(s2_mmu_idx),
true, &ptw->out_host, &full, 0);
env->tlb_fi = NULL;
@ -411,7 +411,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
void *discard;
env->tlb_fi = fi;
flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE,
flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
arm_to_core_mmu_idx(ptw->in_ptw_idx),
true, &discard, 0);
env->tlb_fi = NULL;

View File

@ -118,7 +118,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
* valid. Indicate to probe_access_flags no-fault, then assert that
* we received a valid page.
*/
flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
ra == 0, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
@ -154,7 +154,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
*/
in_page = -(ptr | TARGET_PAGE_MASK);
if (unlikely(ptr_size > in_page)) {
flags |= probe_access_full(env, ptr + in_page, ptr_access,
flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
ptr_mmu_idx, ra == 0, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
}

View File

@ -5352,11 +5352,11 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
addr = useronly_clean_ptr(addr);
#ifdef CONFIG_USER_ONLY
flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, nofault,
&info->host, retaddr);
#else
CPUTLBEntryFull *full;
flags = probe_access_full(env, addr, access_type, mmu_idx, nofault,
flags = probe_access_full(env, addr, 0, access_type, mmu_idx, nofault,
&info->host, &full, retaddr);
#endif
info->flags = flags;

View File

@ -143,7 +143,7 @@ static void reset_btype(DisasContext *s)
static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
{
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
} else {
tcg_gen_movi_i64(dest, s->pc_curr + diff);
@ -393,7 +393,7 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
* update to pc to the unlinked path. A long chain of links
* can thus avoid many updates to the PC.
*/
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
gen_a64_update_pc(s, diff);
tcg_gen_goto_tb(n);
} else {
@ -436,12 +436,6 @@ TCGv_i64 new_tmp_a64(DisasContext *s)
return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
}
TCGv_i64 new_tmp_a64_local(DisasContext *s)
{
assert(s->tmp_a64_count < TMP_A64_MAX);
return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
}
TCGv_i64 new_tmp_a64_zero(DisasContext *s)
{
TCGv_i64 t = new_tmp_a64(s);
@ -4297,7 +4291,7 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
if (page) {
/* ADRP (page based) */
offset <<= 12;
/* The page offset is ok for TARGET_TB_PCREL. */
/* The page offset is ok for CF_PCREL. */
offset -= s->pc_curr & 0xfff;
}
@ -14651,7 +14645,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
* that the TLB entry must be present and valid, and thus this
* access will never raise an exception.
*/
flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx,
flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
false, &host, &full, 0);
assert(!(flags & TLB_INVALID_MASK));
@ -14809,7 +14803,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
DisasContext *dc = container_of(dcbase, DisasContext, base);
target_ulong pc_arg = dc->base.pc_next;
if (TARGET_TB_PCREL) {
if (tb_cflags(dcbase->tb) & CF_PCREL) {
pc_arg &= ~TARGET_PAGE_MASK;
}
tcg_gen_insn_start(pc_arg, 0, 0);

View File

@ -19,7 +19,6 @@
#define TARGET_ARM_TRANSLATE_A64_H
TCGv_i64 new_tmp_a64(DisasContext *s);
TCGv_i64 new_tmp_a64_local(DisasContext *s);
TCGv_i64 new_tmp_a64_zero(DisasContext *s);
TCGv_i64 cpu_reg(DisasContext *s, int reg);
TCGv_i64 cpu_reg_sp(DisasContext *s, int reg);

View File

@ -2694,7 +2694,7 @@ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
return true;
}
last = tcg_temp_local_new_i32();
last = tcg_temp_new_i32();
over = gen_new_label();
find_last_active(s, last, esz, a->pg);
@ -4342,18 +4342,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
tcg_temp_free_i64(t0);
} else {
TCGLabel *loop = gen_new_label();
TCGv_ptr tp, i = tcg_const_local_ptr(0);
/* Copy the clean address into a local temp, live across the loop. */
t0 = clean_addr;
clean_addr = new_tmp_a64_local(s);
tcg_gen_mov_i64(clean_addr, t0);
if (base != cpu_env) {
TCGv_ptr b = tcg_temp_local_new_ptr();
tcg_gen_mov_ptr(b, base);
base = b;
}
TCGv_ptr tp, i = tcg_const_ptr(0);
gen_set_label(loop);
@ -4370,11 +4359,6 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(i);
if (base != cpu_env) {
tcg_temp_free_ptr(base);
assert(len_remain == 0);
}
}
/*
@ -4443,18 +4427,7 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
tcg_temp_free_i64(t0);
} else {
TCGLabel *loop = gen_new_label();
TCGv_ptr tp, i = tcg_const_local_ptr(0);
/* Copy the clean address into a local temp, live across the loop. */
t0 = clean_addr;
clean_addr = new_tmp_a64_local(s);
tcg_gen_mov_i64(clean_addr, t0);
if (base != cpu_env) {
TCGv_ptr b = tcg_temp_local_new_ptr();
tcg_gen_mov_ptr(b, base);
base = b;
}
TCGv_ptr tp, i = tcg_const_ptr(0);
gen_set_label(loop);
@ -4471,11 +4444,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(i);
if (base != cpu_env) {
tcg_temp_free_ptr(base);
assert(len_remain == 0);
}
}
/* Predicate register stores can be any multiple of 2. */

View File

@ -269,7 +269,7 @@ static target_long jmp_diff(DisasContext *s, target_long diff)
static void gen_pc_plus_diff(DisasContext *s, TCGv_i32 var, target_long diff)
{
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff);
} else {
tcg_gen_movi_i32(var, s->pc_curr + diff);
@ -2620,7 +2620,7 @@ static void gen_goto_tb(DisasContext *s, int n, target_long diff)
* update to pc to the unlinked path. A long chain of links
* can thus avoid many updates to the PC.
*/
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
gen_update_pc(s, diff);
tcg_gen_goto_tb(n);
} else {
@ -7136,7 +7136,7 @@ static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
addr = tcg_temp_local_new_i32();
addr = tcg_temp_new_i32();
load_reg_var(s, addr, a->rn);
tcg_gen_addi_i32(addr, addr, a->imm);
@ -7289,7 +7289,7 @@ static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
return true;
}
addr = tcg_temp_local_new_i32();
addr = tcg_temp_new_i32();
load_reg_var(s, addr, a->rn);
tcg_gen_addi_i32(addr, addr, a->imm);
@ -8696,7 +8696,7 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
* Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local
* so that decr stays live after the brcondi.
*/
TCGv_i32 decr = tcg_temp_local_new_i32();
TCGv_i32 decr = tcg_temp_new_i32();
TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize);
tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize);
tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
@ -9542,7 +9542,7 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
uint32_t condexec_bits;
target_ulong pc_arg = dc->base.pc_next;
if (TARGET_TB_PCREL) {
if (tb_cflags(dcbase->tb) & CF_PCREL) {
pc_arg &= ~TARGET_PAGE_MASK;
}
if (dc->eci) {
@ -9970,7 +9970,7 @@ static const TranslatorOps thumb_translator_ops = {
};
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = { };

View File

@ -23,7 +23,7 @@ typedef struct DisasContext {
/* The address of the current instruction being translated. */
target_ulong pc_curr;
/*
* For TARGET_TB_PCREL, the full value of cpu_pc is not known
* For CF_PCREL, the full value of cpu_pc is not known
* (although the page offset is known). For convenience, the
* translation loop uses the full virtual address that triggered
* the translation, from base.pc_start through pc_curr.

View File

@ -54,7 +54,8 @@ static void avr_cpu_synchronize_from_tb(CPUState *cs,
AVRCPU *cpu = AVR_CPU(cs);
CPUAVRState *env = &cpu->env;
env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
env->pc_w = tb->pc / 2; /* internally PC points to words */
}
static void avr_restore_state_to_opc(CPUState *cs,

View File

@ -3049,7 +3049,7 @@ static const TranslatorOps avr_tr_ops = {
.disas_log = avr_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = { };

View File

@ -1621,7 +1621,7 @@ static int dec_bound_r(CPUCRISState *env, DisasContext *dc)
LOG_DIS("bound.%c $r%u, $r%u\n",
memsize_char(size), dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
l0 = tcg_temp_local_new();
l0 = tcg_temp_new();
dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
tcg_temp_free(l0);
@ -2404,8 +2404,8 @@ static int dec_bound_m(CPUCRISState *env, DisasContext *dc)
dc->op1, dc->postinc ? "+]" : "]",
dc->op2);
l[0] = tcg_temp_local_new();
l[1] = tcg_temp_local_new();
l[0] = tcg_temp_new();
l[1] = tcg_temp_new();
insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]);
cris_cc_mask(dc, CC_MASK_NZ);
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
@ -3286,7 +3286,7 @@ static const TranslatorOps cris_tr_ops = {
.disas_log = cris_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -68,9 +68,9 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
unsigned int size, int mem_index)
{
TCGLabel *l1 = gen_new_label();
TCGv taddr = tcg_temp_local_new();
TCGv tval = tcg_temp_local_new();
TCGv t1 = tcg_temp_local_new();
TCGv taddr = tcg_temp_new();
TCGv tval = tcg_temp_new();
TCGv t1 = tcg_temp_new();
dc->postinc = 0;
cris_evaluate_flags(dc);
@ -434,7 +434,7 @@ static void dec10_reg_bound(DisasContext *dc, int size)
{
TCGv t;
t = tcg_temp_local_new();
t = tcg_temp_new();
t_gen_zext(t, cpu_R[dc->src], size);
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
tcg_temp_free(t);
@ -935,7 +935,7 @@ static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc,
int rd = dc->dst;
TCGv t;
t = tcg_temp_local_new();
t = tcg_temp_new();
insn_len += dec10_prep_move_m(env, dc, 0, size, t);
cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4);
if (dc->dst == 15) {

View File

@ -81,7 +81,7 @@ tcg_funcs_generated.c.inc
Insn *insn,
Packet *pkt)
{
TCGv RdV = tcg_temp_local_new();
TCGv RdV = tcg_temp_new();
const int RdN = insn->regno[0];
TCGv RsV = hex_gpr[insn->regno[1]];
TCGv RtV = hex_gpr[insn->regno[2]];
@ -146,16 +146,16 @@ istruction.
const int VdN = insn->regno[0];
const intptr_t VdV_off =
ctx_future_vreg_off(ctx, VdN, 1, true);
TCGv_ptr VdV = tcg_temp_local_new_ptr();
TCGv_ptr VdV = tcg_temp_new_ptr();
tcg_gen_addi_ptr(VdV, cpu_env, VdV_off);
const int VuN = insn->regno[1];
const intptr_t VuV_off =
vreg_src_off(ctx, VuN);
TCGv_ptr VuV = tcg_temp_local_new_ptr();
TCGv_ptr VuV = tcg_temp_new_ptr();
const int VvN = insn->regno[2];
const intptr_t VvV_off =
vreg_src_off(ctx, VvN);
TCGv_ptr VvV = tcg_temp_local_new_ptr();
TCGv_ptr VvV = tcg_temp_new_ptr();
tcg_gen_addi_ptr(VuV, cpu_env, VuV_off);
tcg_gen_addi_ptr(VvV, cpu_env, VvV_off);
TCGv slot = tcg_constant_tl(insn->slot);

View File

@ -23,6 +23,7 @@
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
static void hexagon_v67_cpu_init(Object *obj)
{
@ -263,7 +264,8 @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
{
HexagonCPU *cpu = HEXAGON_CPU(cs);
CPUHexagonState *env = &cpu->env;
env->gpr[HEX_REG_PC] = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
env->gpr[HEX_REG_PC] = tb->pc;
}
static bool hexagon_cpu_has_work(CPUState *cs)

View File

@ -337,7 +337,7 @@
*/
#define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \
do { \
TCGv LSB = tcg_temp_local_new(); \
TCGv LSB = tcg_temp_new(); \
TCGLabel *label = gen_new_label(); \
tcg_gen_movi_tl(EA, 0); \
PRED; \
@ -397,7 +397,7 @@
/* Predicated loads into a register pair */
#define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \
do { \
TCGv LSB = tcg_temp_local_new(); \
TCGv LSB = tcg_temp_new(); \
TCGLabel *label = gen_new_label(); \
tcg_gen_movi_tl(EA, 0); \
PRED; \

View File

@ -26,18 +26,14 @@ import hex_common
## Helpers for gen_tcg_func
##
def gen_decl_ea_tcg(f, tag):
if ('A_CONDEXEC' in hex_common.attribdict[tag] or
'A_LOAD' in hex_common.attribdict[tag]):
f.write(" TCGv EA = tcg_temp_local_new();\n")
else:
f.write(" TCGv EA = tcg_temp_new();\n")
f.write(" TCGv EA = tcg_temp_new();\n")
def gen_free_ea_tcg(f):
f.write(" tcg_temp_free(EA);\n")
def genptr_decl_pair_writable(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \
f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \
(regtype, regid))
if (regtype == "C"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
@ -56,7 +52,7 @@ def genptr_decl_pair_writable(f, tag, regtype, regid, regno):
def genptr_decl_writable(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \
f.write(" TCGv %s%sV = tcg_temp_new();\n" % \
(regtype, regid))
if (regtype == "C"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
@ -73,7 +69,7 @@ def genptr_decl(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
if (regtype == "R"):
if (regid in {"ss", "tt"}):
f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \
f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \
(regtype, regid))
f.write(" const int %s = insn->regno[%d];\n" % \
(regN, regno))
@ -96,14 +92,14 @@ def genptr_decl(f, tag, regtype, regid, regno):
print("Bad register parse: ", regtype, regid)
elif (regtype == "C"):
if (regid == "ss"):
f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \
f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \
(regtype, regid))
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
elif (regid == "dd"):
genptr_decl_pair_writable(f, tag, regtype, regid, regno)
elif (regid == "s"):
f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \
f.write(" TCGv %s%sV = tcg_temp_new();\n" % \
(regtype, regid))
f.write(" const int %s%sN = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regtype, regid, regno))
@ -575,7 +571,7 @@ def genptr_dst_write_opn(f,regtype, regid, tag):
## We produce:
## static void generate_A2_add(DisasContext *ctx)
## {
## TCGv RdV = tcg_temp_local_new();
## TCGv RdV = tcg_temp_new();
## const int RdN = insn->regno[0];
## TCGv RsV = hex_gpr[insn->regno[1]];
## TCGv RtV = hex_gpr[insn->regno[2]];

View File

@ -706,7 +706,7 @@ static void gen_cond_call(DisasContext *ctx, TCGv pred,
TCGCond cond, int pc_off)
{
TCGv next_PC;
TCGv lsb = tcg_temp_local_new();
TCGv lsb = tcg_temp_new();
TCGLabel *skip = gen_new_label();
tcg_gen_andi_tl(lsb, pred, 1);
gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb);
@ -720,7 +720,7 @@ static void gen_cond_call(DisasContext *ctx, TCGv pred,
static void gen_endloop0(DisasContext *ctx)
{
TCGv lpcfg = tcg_temp_local_new();
TCGv lpcfg = tcg_temp_new();
GET_USR_FIELD(USR_LPCFG, lpcfg);
@ -852,7 +852,7 @@ static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt)
/* Bidirectional shift right with saturation */
static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
{
TCGv shift_amt = tcg_temp_local_new();
TCGv shift_amt = tcg_temp_new();
TCGLabel *positive = gen_new_label();
TCGLabel *done = gen_new_label();
@ -876,7 +876,7 @@ static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
/* Bidirectional shift left with saturation */
static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
{
TCGv shift_amt = tcg_temp_local_new();
TCGv shift_amt = tcg_temp_new();
TCGLabel *positive = gen_new_label();
TCGLabel *done = gen_new_label();
@ -918,7 +918,7 @@ static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num,
intptr_t dstoff;
if (is_predicated) {
TCGv cancelled = tcg_temp_local_new();
TCGv cancelled = tcg_temp_new();
label_end = gen_new_label();
/* Don't do anything if the slot was cancelled */
@ -959,7 +959,7 @@ static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew,
intptr_t dstoff;
if (is_predicated) {
TCGv cancelled = tcg_temp_local_new();
TCGv cancelled = tcg_temp_new();
label_end = gen_new_label();
/* Don't do anything if the slot was cancelled */
@ -1164,10 +1164,10 @@ void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width)
/* Implements the fADDSAT64 macro in TCG */
void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 sum = tcg_temp_local_new_i64();
TCGv_i64 sum = tcg_temp_new_i64();
TCGv_i64 xor = tcg_temp_new_i64();
TCGv_i64 cond1 = tcg_temp_new_i64();
TCGv_i64 cond2 = tcg_temp_local_new_i64();
TCGv_i64 cond2 = tcg_temp_new_i64();
TCGv_i64 cond3 = tcg_temp_new_i64();
TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL);
TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL);

View File

@ -294,9 +294,9 @@ generators the previous declarations are mapped to
::
int var1; -> TCGv_i32 var1 = tcg_temp_local_new_i32();
int var1; -> TCGv_i32 var1 = tcg_temp_new_i32();
int var2 = 0; -> TCGv_i32 var1 = tcg_temp_local_new_i32();
int var2 = 0; -> TCGv_i32 var1 = tcg_temp_new_i32();
tcg_gen_movi_i32(j, ((int64_t) 0ULL));
which are later automatically freed at the end of the function they're declared

View File

@ -307,26 +307,6 @@ HexValue gen_tmp(Context *c,
return rvalue;
}
HexValue gen_tmp_local(Context *c,
YYLTYPE *locp,
unsigned bit_width,
HexSignedness signedness)
{
HexValue rvalue;
assert(bit_width == 32 || bit_width == 64);
memset(&rvalue, 0, sizeof(HexValue));
rvalue.type = TEMP;
rvalue.bit_width = bit_width;
rvalue.signedness = signedness;
rvalue.is_dotnew = false;
rvalue.is_manual = false;
rvalue.tmp.index = c->inst.tmp_count;
OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count,
" = tcg_temp_local_new_i", &bit_width, "();\n");
c->inst.tmp_count++;
return rvalue;
}
HexValue gen_tmp_value(Context *c,
YYLTYPE *locp,
const char *value,
@ -554,7 +534,7 @@ void gen_varid_allocate(Context *c,
new_var.signedness = signedness;
EMIT_HEAD(c, "TCGv_%s %s", bit_suffix, varid->var.name->str);
EMIT_HEAD(c, " = tcg_temp_local_new_%s();\n", bit_suffix);
EMIT_HEAD(c, " = tcg_temp_new_%s();\n", bit_suffix);
g_array_append_val(c->inst.allocated, new_var);
}
@ -2161,8 +2141,8 @@ HexValue gen_rvalue_sat(Context *c, YYLTYPE *locp, HexSat *sat,
assert_signedness(c, locp, sat->signedness);
unsigned_str = (sat->signedness == UNSIGNED) ? "u" : "";
res = gen_tmp_local(c, locp, value->bit_width, sat->signedness);
ovfl = gen_tmp_local(c, locp, 32, sat->signedness);
res = gen_tmp(c, locp, value->bit_width, sat->signedness);
ovfl = gen_tmp(c, locp, 32, sat->signedness);
OUT(c, locp, "gen_sat", unsigned_str, "_", bit_suffix, "_ovfl(");
OUT(c, locp, &ovfl, ", ", &res, ", ", value, ", ", &width->imm.value,
");\n");

View File

@ -539,7 +539,7 @@ void process_store(DisasContext *ctx, int slot_num)
tcg_temp_free(cancelled);
}
{
TCGv address = tcg_temp_local_new();
TCGv address = tcg_temp_new();
tcg_gen_mov_tl(address, hex_store_addr[slot_num]);
/*
@ -962,7 +962,7 @@ static const TranslatorOps hexagon_tr_ops = {
.disas_log = hexagon_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -26,7 +26,7 @@
#include "qemu/module.h"
#include "exec/exec-all.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
@ -48,8 +48,10 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
{
HPPACPU *cpu = HPPA_CPU(cs);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
#ifdef CONFIG_USER_ONLY
cpu->env.iaoq_f = tb_pc(tb);
cpu->env.iaoq_f = tb->pc;
cpu->env.iaoq_b = tb->cs_base;
#else
/* Recover the IAOQ values from the GVA + PRIV. */
@ -59,7 +61,7 @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
int32_t diff = cs_base;
cpu->env.iasq_f = iasq_f;
cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv;
cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
if (diff) {
cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
}

View File

@ -35,7 +35,6 @@
#undef TCGv
#undef tcg_temp_new
#undef tcg_global_mem_new
#undef tcg_temp_local_new
#undef tcg_temp_free
#if TARGET_LONG_BITS == 64
@ -59,7 +58,6 @@
#define tcg_temp_new tcg_temp_new_i64
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new tcg_temp_local_new_i64
#define tcg_temp_free tcg_temp_free_i64
#define tcg_gen_movi_reg tcg_gen_movi_i64
@ -155,7 +153,6 @@
#define TCGv_reg TCGv_i32
#define tcg_temp_new tcg_temp_new_i32
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new tcg_temp_local_new_i32
#define tcg_temp_free tcg_temp_free_i32
#define tcg_gen_movi_reg tcg_gen_movi_i32
@ -4359,7 +4356,7 @@ static const TranslatorOps hppa_tr_ops = {
.disas_log = hppa_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -25,8 +25,4 @@
#define TARGET_PAGE_BITS 12
#define NB_MMU_MODES 5
#ifndef CONFIG_USER_ONLY
# define TARGET_TB_PCREL 1
#endif
#endif

View File

@ -6534,6 +6534,11 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
static bool ht_warned;
unsigned requested_lbr_fmt;
/* Use pc-relative instructions in system-mode */
#ifndef CONFIG_USER_ONLY
cs->tcg_cflags |= CF_PCREL;
#endif
if (cpu->apic_id == UNASSIGNED_APIC_ID) {
error_setg(errp, "apic-id property was not initialized properly");
return;

View File

@ -520,7 +520,7 @@ static inline target_ulong get_memio_eip(CPUX86State *env)
}
/* Per x86_restore_state_to_opc. */
if (TARGET_TB_PCREL) {
if (cs->tcg_cflags & CF_PCREL) {
return (env->eip & TARGET_PAGE_MASK) | data[0];
} else {
return data[0] - env->segs[R_CS].base;

View File

@ -64,7 +64,7 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr)
int flags;
inout->gaddr = addr;
flags = probe_access_full(inout->env, addr, MMU_DATA_STORE,
flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE,
inout->ptw_idx, true, &inout->haddr, &full, 0);
if (unlikely(flags & TLB_INVALID_MASK)) {
@ -428,7 +428,7 @@ do_check_protect_pse36:
CPUTLBEntryFull *full;
int flags, nested_page_size;
flags = probe_access_full(env, paddr, access_type,
flags = probe_access_full(env, paddr, 0, access_type,
MMU_NESTED_IDX, true,
&pte_trans.haddr, &full, 0);
if (unlikely(flags & TLB_INVALID_MASK)) {

View File

@ -49,10 +49,10 @@ static void x86_cpu_exec_exit(CPUState *cs)
static void x86_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
/* The instruction pointer is always up to date with TARGET_TB_PCREL. */
if (!TARGET_TB_PCREL) {
/* The instruction pointer is always up to date with CF_PCREL. */
if (!(tb_cflags(tb) & CF_PCREL)) {
CPUX86State *env = cs->env_ptr;
env->eip = tb_pc(tb) - tb->cs_base;
env->eip = tb->pc - tb->cs_base;
}
}
@ -64,7 +64,7 @@ static void x86_restore_state_to_opc(CPUState *cs,
CPUX86State *env = &cpu->env;
int cc_op = data[1];
if (TARGET_TB_PCREL) {
if (tb_cflags(tb) & CF_PCREL) {
env->eip = (env->eip & TARGET_PAGE_MASK) | data[0];
} else {
env->eip = data[0] - tb->cs_base;

View File

@ -545,7 +545,7 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
static void gen_update_eip_cur(DisasContext *s)
{
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
} else {
tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
@ -556,7 +556,7 @@ static void gen_update_eip_cur(DisasContext *s)
static void gen_update_eip_next(DisasContext *s)
{
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
} else {
tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
@ -588,7 +588,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s)
if (CODE64(s)) {
return tcg_constant_i32(-1);
}
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
TCGv_i32 ret = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(ret, cpu_eip);
tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
@ -601,7 +601,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s)
static TCGv eip_next_tl(DisasContext *s)
{
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
return ret;
@ -613,7 +613,7 @@ static TCGv eip_next_tl(DisasContext *s)
static TCGv eip_cur_tl(DisasContext *s)
{
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
return ret;
@ -1830,7 +1830,7 @@ static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
/* The CC_OP value is no longer predictable. */
/* The CC_OP value is no longer predictable. */
set_cc_op(s, CC_OP_DYNAMIC);
}
@ -1923,7 +1923,7 @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
gen_op_ld_v(s, ot, s->T0, s->A0);
else
gen_op_mov_v_reg(s, ot, s->T0, op1);
if (is_right) {
switch (ot) {
case MO_8:
@ -2319,7 +2319,7 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
ea = cpu_regs[a.base];
}
if (!ea) {
if (TARGET_TB_PCREL && a.base == -2) {
if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
/* With cpu_eip ~= pc_save, the expression is pc-relative. */
tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
} else {
@ -2867,7 +2867,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
if (!CODE64(s)) {
if (ot == MO_16) {
mask = 0xffff;
if (TARGET_TB_PCREL && CODE32(s)) {
if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
use_goto_tb = false;
}
} else {
@ -2879,7 +2879,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
gen_update_cc_op(s);
set_cc_op(s, CC_OP_DYNAMIC);
if (TARGET_TB_PCREL) {
if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
/*
* If we can prove the branch does not leave the page and we have
@ -2896,13 +2896,13 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
if (!TARGET_TB_PCREL) {
if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
tcg_gen_movi_tl(cpu_eip, new_eip);
}
tcg_gen_exit_tb(s->base.tb, tb_num);
s->base.is_jmp = DISAS_NORETURN;
} else {
if (!TARGET_TB_PCREL) {
if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
tcg_gen_movi_tl(cpu_eip, new_eip);
}
if (s->jmp_opt) {
@ -3426,13 +3426,10 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
if (mod == 3) {
goto illegal_op;
}
a0 = tcg_temp_local_new();
t0 = tcg_temp_local_new();
a0 = s->A0;
t0 = s->T0;
label1 = gen_new_label();
tcg_gen_mov_tl(a0, s->A0);
tcg_gen_mov_tl(t0, s->T0);
gen_set_label(label1);
t1 = tcg_temp_new();
t2 = tcg_temp_new();
@ -3444,9 +3441,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
tcg_temp_free(t2);
tcg_temp_free(a0);
tcg_gen_neg_tl(s->T0, t0);
tcg_temp_free(t0);
} else {
tcg_gen_neg_tl(s->T0, s->T0);
if (mod != 3) {
@ -6248,13 +6243,13 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
#endif
{
TCGLabel *label1;
TCGv t0, t1, t2, a0;
TCGv t0, t1, t2;
if (!PE(s) || VM86(s))
goto illegal_op;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
t0 = tcg_temp_new();
t1 = tcg_temp_new();
t2 = tcg_temp_new();
ot = MO_16;
modrm = x86_ldub_code(env, s);
reg = (modrm >> 3) & 7;
@ -6263,11 +6258,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, ot, t0, s->A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, s->A0);
} else {
gen_op_mov_v_reg(s, ot, t0, rm);
a0 = NULL;
}
gen_op_mov_v_reg(s, ot, t1, reg);
tcg_gen_andi_tl(s->tmp0, t0, 3);
@ -6280,8 +6272,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_movi_tl(t2, CC_Z);
gen_set_label(label1);
if (mod != 3) {
gen_op_st_v(s, ot, t0, a0);
tcg_temp_free(a0);
gen_op_st_v(s, ot, t0, s->A0);
} else {
gen_op_mov_reg_v(s, ot, rm, t0);
}
@ -6304,7 +6295,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
modrm = x86_ldub_code(env, s);
reg = ((modrm >> 3) & 7) | REX_R(s);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
t0 = tcg_temp_local_new();
t0 = tcg_temp_new();
gen_update_cc_op(s);
if (b == 0x102) {
gen_helper_lar(t0, cpu_env, s->T0);
@ -7052,7 +7043,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
dc->tmp2_i32 = tcg_temp_new_i32();
dc->tmp3_i32 = tcg_temp_new_i32();
dc->tmp4 = tcg_temp_new();
dc->cc_srcT = tcg_temp_local_new();
dc->cc_srcT = tcg_temp_new();
}
static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
@ -7065,7 +7056,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
target_ulong pc_arg = dc->base.pc_next;
dc->prev_insn_end = tcg_last_op();
if (TARGET_TB_PCREL) {
if (tb_cflags(dcbase->tb) & CF_PCREL) {
pc_arg -= dc->cs_base;
pc_arg &= ~TARGET_PAGE_MASK;
}
@ -7158,7 +7149,7 @@ static const TranslatorOps i386_tr_ops = {
};
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -18,6 +18,7 @@
#include "fpu/softfloat-helpers.h"
#include "cpu-csr.h"
#include "sysemu/reset.h"
#include "tcg/tcg.h"
const char * const regnames[32] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
@ -321,7 +322,8 @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
LoongArchCPU *cpu = LOONGARCH_CPU(cs);
CPULoongArchState *env = &cpu->env;
env->pc = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
env->pc = tb->pc;
}
static void loongarch_restore_state_to_opc(CPUState *cs,
@ -599,7 +601,7 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
oc = object_class_by_name(cpu_model);
if (!oc) {
g_autofree char *typename
g_autofree char *typename
= g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
oc = object_class_by_name(typename);
if (!oc) {

View File

@ -245,7 +245,7 @@ static const TranslatorOps loongarch_tr_ops = {
.disas_log = loongarch_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -6393,7 +6393,7 @@ static const TranslatorOps m68k_tr_ops = {
.disas_log = m68k_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -30,6 +30,7 @@
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
static const struct {
const char *name;
@ -97,7 +98,8 @@ static void mb_cpu_synchronize_from_tb(CPUState *cs,
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
cpu->env.pc = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
cpu->env.pc = tb->pc;
cpu->env.iflags = tb->flags & IFLAGS_TB_MASK;
}

View File

@ -1849,7 +1849,7 @@ static const TranslatorOps mb_tr_ops = {
.disas_log = mb_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -82,7 +82,8 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
env->active_tc.PC = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
env->active_tc.PC = tb->pc;
env->hflags &= ~MIPS_HFLAG_BMASK;
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
}

View File

@ -1017,8 +1017,8 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
uint32_t reg1, uint32_t reg2, bool eva)
{
TCGv taddr = tcg_temp_local_new();
TCGv lladdr = tcg_temp_local_new();
TCGv taddr = tcg_temp_new();
TCGv lladdr = tcg_temp_new();
TCGv_i64 tval = tcg_temp_new_i64();
TCGv_i64 llval = tcg_temp_new_i64();
TCGv_i64 val = tcg_temp_new_i64();

View File

@ -94,7 +94,7 @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
CPUMIPSState *env = &cpu->env;
if ((env->hflags & MIPS_HFLAG_BMASK) != 0
&& env->active_tc.PC != tb_pc(tb)) {
&& !(cs->tcg_cflags & CF_PCREL) && env->active_tc.PC != tb->pc) {
env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
env->hflags &= ~MIPS_HFLAG_BMASK;
return true;

View File

@ -2400,7 +2400,7 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc,
switch (opc) {
case OPC_ADDI:
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGLabel *l1 = gen_new_label();
@ -2434,7 +2434,7 @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc,
#if defined(TARGET_MIPS64)
case OPC_DADDI:
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGLabel *l1 = gen_new_label();
@ -2630,7 +2630,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc,
switch (opc) {
case OPC_ADD:
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGLabel *l1 = gen_new_label();
@ -2666,7 +2666,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc,
break;
case OPC_SUB:
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGLabel *l1 = gen_new_label();
@ -2707,7 +2707,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc,
#if defined(TARGET_MIPS64)
case OPC_DADD:
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGLabel *l1 = gen_new_label();
@ -2741,7 +2741,7 @@ static void gen_arith(DisasContext *ctx, uint32_t opc,
break;
case OPC_DSUB:
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
TCGLabel *l1 = gen_new_label();
@ -3759,26 +3759,8 @@ static void gen_loongson_integer(DisasContext *ctx, uint32_t opc,
return;
}
switch (opc) {
case OPC_MULT_G_2E:
case OPC_MULT_G_2F:
case OPC_MULTU_G_2E:
case OPC_MULTU_G_2F:
#if defined(TARGET_MIPS64)
case OPC_DMULT_G_2E:
case OPC_DMULT_G_2F:
case OPC_DMULTU_G_2E:
case OPC_DMULTU_G_2F:
#endif
t0 = tcg_temp_new();
t1 = tcg_temp_new();
break;
default:
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
break;
}
t0 = tcg_temp_new();
t1 = tcg_temp_new();
gen_load_gpr(t0, rs);
gen_load_gpr(t1, rt);
@ -3955,21 +3937,10 @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt)
TCGCond cond;
opc = MASK_LMMI(ctx->opcode);
switch (opc) {
case OPC_ADD_CP2:
case OPC_SUB_CP2:
case OPC_DADD_CP2:
case OPC_DSUB_CP2:
t0 = tcg_temp_local_new_i64();
t1 = tcg_temp_local_new_i64();
break;
default:
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
break;
}
check_cp1_enabled(ctx);
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, t0, rs);
gen_load_fpr64(ctx, t1, rt);
@ -8650,7 +8621,7 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd,
int u, int sel, int h)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) !=
@ -8878,7 +8849,7 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt,
int u, int sel, int h)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
@ -11409,7 +11380,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc,
case OPC_ALNV_PS:
check_ps(ctx);
{
TCGv t0 = tcg_temp_local_new();
TCGv t0 = tcg_temp_new();
TCGv_i32 fp = tcg_temp_new_i32();
TCGv_i32 fph = tcg_temp_new_i32();
TCGLabel *l1 = gen_new_label();
@ -16159,7 +16130,7 @@ static const TranslatorOps mips_tr_ops = {
.disas_log = mips_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -1037,7 +1037,7 @@ static const TranslatorOps nios2_tr_ops = {
.disas_log = nios2_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -22,6 +22,7 @@
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
{
@ -43,7 +44,8 @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
cpu->env.pc = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
cpu->env.pc = tb->pc;
}
static void openrisc_restore_state_to_opc(CPUState *cs,

View File

@ -1705,7 +1705,7 @@ static const TranslatorOps openrisc_tr_ops = {
.disas_log = openrisc_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -4415,7 +4415,7 @@ static void gen_bcond(DisasContext *ctx, int type)
TCGv target;
if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
target = tcg_temp_local_new();
target = tcg_temp_new();
if (type == BCOND_CTR) {
tcg_gen_mov_tl(target, cpu_ctr);
} else if (type == BCOND_TAR) {
@ -5594,8 +5594,8 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
{
TCGv t0, t1;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t0 = tcg_temp_new();
t1 = tcg_temp_new();
switch (opc3 & 0x0D) {
case 0x05:
@ -7707,7 +7707,7 @@ static const TranslatorOps ppc_tr_ops = {
.disas_log = ppc_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -168,7 +168,7 @@ static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
TCGv_i32 t0 = tcg_temp_local_new_i32();
TCGv_i32 t0 = tcg_temp_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
@ -185,7 +185,7 @@ static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
TCGv_i32 t0 = tcg_temp_local_new_i32();
TCGv_i32 t0 = tcg_temp_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
@ -202,7 +202,7 @@ static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
TCGv_i32 t0 = tcg_temp_local_new_i32();
TCGv_i32 t0 = tcg_temp_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
@ -378,7 +378,7 @@ static inline void gen_evsel(DisasContext *ctx)
TCGLabel *l2 = gen_new_label();
TCGLabel *l3 = gen_new_label();
TCGLabel *l4 = gen_new_label();
TCGv_i32 t0 = tcg_temp_local_new_i32();
TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);

View File

@ -1508,8 +1508,8 @@ static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VECTOR(ctx);
vra = tcg_temp_local_new_i64();
vrb = tcg_temp_local_new_i64();
vra = tcg_temp_new_i64();
vrb = tcg_temp_new_i64();
gt = gen_new_label();
lt = gen_new_label();
done = gen_new_label();

View File

@ -34,6 +34,7 @@
#include "fpu/softfloat-helpers.h"
#include "sysemu/kvm.h"
#include "kvm_riscv.h"
#include "tcg/tcg.h"
/* RISC-V CPU definitions */
@ -533,10 +534,12 @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs,
CPURISCVState *env = &cpu->env;
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
if (xl == MXL_RV32) {
env->pc = (int32_t)tb_pc(tb);
env->pc = (int32_t) tb->pc;
} else {
env->pc = tb_pc(tb);
env->pc = tb->pc;
}
}

View File

@ -1309,7 +1309,7 @@ static const TranslatorOps riscv_tr_ops = {
.disas_log = riscv_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -44,7 +44,8 @@ static void rx_cpu_synchronize_from_tb(CPUState *cs,
{
RXCPU *cpu = RX_CPU(cs);
cpu->env.pc = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
cpu->env.pc = tb->pc;
}
static void rx_restore_state_to_opc(CPUState *cs,

View File

@ -2363,7 +2363,7 @@ static const TranslatorOps rx_tr_ops = {
.disas_log = rx_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -145,7 +145,7 @@ static inline int s390_probe_access(CPUArchState *env, target_ulong addr,
int mmu_idx, bool nonfault,
void **phost, uintptr_t ra)
{
int flags = probe_access_flags(env, addr, access_type, mmu_idx,
int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx,
nonfault, phost, ra);
if (unlikely(flags & TLB_INVALID_MASK)) {

View File

@ -6619,7 +6619,7 @@ static const TranslatorOps s390x_tr_ops = {
.disas_log = s390x_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc;

View File

@ -26,6 +26,7 @@
#include "migration/vmstate.h"
#include "exec/exec-all.h"
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
static void superh_cpu_set_pc(CPUState *cs, vaddr value)
{
@ -46,7 +47,8 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
{
SuperHCPU *cpu = SUPERH_CPU(cs);
cpu->env.pc = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
cpu->env.pc = tb->pc;
cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
}
@ -73,7 +75,7 @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
CPUSH4State *env = &cpu->env;
if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND))
&& env->pc != tb_pc(tb)) {
&& !(cs->tcg_cflags & CF_PCREL) && env->pc != tb->pc) {
env->pc -= 2;
env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND);
return true;

View File

@ -2374,7 +2374,7 @@ static const TranslatorOps sh4_tr_ops = {
.disas_log = sh4_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -25,6 +25,7 @@
#include "exec/exec-all.h"
#include "hw/qdev-properties.h"
#include "qapi/visitor.h"
#include "tcg/tcg.h"
//#define DEBUG_FEATURES
@ -707,7 +708,8 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs,
{
SPARCCPU *cpu = SPARC_CPU(cs);
cpu->env.pc = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
cpu->env.pc = tb->pc;
cpu->env.npc = tb->cs_base;
}

View File

@ -5904,7 +5904,7 @@ static const TranslatorOps sparc_tr_ops = {
.disas_log = sparc_tr_disas_log,
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = {};

View File

@ -55,7 +55,8 @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs,
TriCoreCPU *cpu = TRICORE_CPU(cs);
CPUTriCoreState *env = &cpu->env;
env->PC = tb_pc(tb);
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
env->PC = tb->pc;
}
static void tricore_restore_state_to_opc(CPUState *cs,

View File

@ -8881,7 +8881,7 @@ static const TranslatorOps tricore_tr_ops = {
};
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext ctx;

View File

@ -307,7 +307,7 @@ static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
{
if (!dc->sar_m32_allocated) {
dc->sar_m32 = tcg_temp_local_new_i32();
dc->sar_m32 = tcg_temp_new_i32();
dc->sar_m32_allocated = true;
}
tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
@ -1074,10 +1074,10 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
if (i == 0 || arg_copy[i].resource != resource) {
resource = arg_copy[i].resource;
if (arg_copy[i].arg->num_bits <= 32) {
temp = tcg_temp_local_new_i32();
temp = tcg_temp_new_i32();
tcg_gen_mov_i32(temp, arg_copy[i].arg->in);
} else if (arg_copy[i].arg->num_bits <= 64) {
temp = tcg_temp_local_new_i64();
temp = tcg_temp_new_i64();
tcg_gen_mov_i64(temp, arg_copy[i].arg->in);
} else {
g_assert_not_reached();
@ -1187,7 +1187,7 @@ static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
DisasContext *dc = container_of(dcbase, DisasContext, base);
if (dc->icount) {
dc->next_icount = tcg_temp_local_new_i32();
dc->next_icount = tcg_temp_new_i32();
}
}
@ -1279,7 +1279,7 @@ static const TranslatorOps xtensa_translator_ops = {
.disas_log = xtensa_tr_disas_log,
};
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc)
{
DisasContext dc = {};
@ -2273,8 +2273,8 @@ static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
TCGv_i32 tmp = tcg_temp_local_new_i32();
TCGv_i32 addr = tcg_temp_local_new_i32();
TCGv_i32 tmp = tcg_temp_new_i32();
TCGv_i32 addr = tcg_temp_new_i32();
MemOp mop;
tcg_gen_mov_i32(tmp, arg[0].in);
@ -2303,8 +2303,8 @@ static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
TCGv_i32 prev = tcg_temp_new_i32();
TCGv_i32 addr = tcg_temp_local_new_i32();
TCGv_i32 res = tcg_temp_local_new_i32();
TCGv_i32 addr = tcg_temp_new_i32();
TCGv_i32 res = tcg_temp_new_i32();
TCGLabel *label = gen_new_label();
MemOp mop;

View File

@ -190,7 +190,7 @@ static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
} else if (i->kind > ts->kind) {
if (i->kind == TEMP_GLOBAL) {
g = i;
} else if (i->kind == TEMP_LOCAL) {
} else if (i->kind == TEMP_TB) {
l = i;
}
}

View File

@ -116,8 +116,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -137,8 +137,8 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -157,9 +157,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
TCGv_ptr a0, a1, a2;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a2 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -180,10 +180,10 @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
TCGv_ptr a0, a1, a2, a3;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a2 = tcg_temp_new_ptr();
a3 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
a3 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -206,11 +206,11 @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
TCGv_ptr a0, a1, a2, a3, a4;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a2 = tcg_temp_new_ptr();
a3 = tcg_temp_new_ptr();
a4 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
a3 = tcg_temp_ebb_new_ptr();
a4 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -236,8 +236,8 @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
TCGv_ptr a0, a1;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -257,9 +257,9 @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
TCGv_ptr a0, a1, a2;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a2 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -282,10 +282,10 @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
TCGv_ptr a0, a1, a2, a3;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a2 = tcg_temp_new_ptr();
a3 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
a3 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -310,11 +310,11 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
TCGv_ptr a0, a1, a2, a3, a4;
TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data));
a0 = tcg_temp_new_ptr();
a1 = tcg_temp_new_ptr();
a2 = tcg_temp_new_ptr();
a3 = tcg_temp_new_ptr();
a4 = tcg_temp_new_ptr();
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
a3 = tcg_temp_ebb_new_ptr();
a4 = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(a0, cpu_env, dofs);
tcg_gen_addi_ptr(a1, cpu_env, aofs);
@ -575,16 +575,16 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
be simple enough. */
if (TCG_TARGET_REG_BITS == 64
&& (vece != MO_32 || !check_size_impl(oprsz, 4))) {
t_64 = tcg_temp_new_i64();
t_64 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t_64, in_32);
tcg_gen_dup_i64(vece, t_64, t_64);
} else {
t_32 = tcg_temp_new_i32();
t_32 = tcg_temp_ebb_new_i32();
tcg_gen_dup_i32(vece, t_32, in_32);
}
} else if (in_64) {
/* We are given a 64-bit variable input. */
t_64 = tcg_temp_new_i64();
t_64 = tcg_temp_ebb_new_i64();
tcg_gen_dup_i64(vece, t_64, in_64);
} else {
/* We are given a constant input. */
@ -619,7 +619,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
}
/* Otherwise implement out of line. */
t_ptr = tcg_temp_new_ptr();
t_ptr = tcg_temp_ebb_new_ptr();
tcg_gen_addi_ptr(t_ptr, cpu_env, dofs);
/*
@ -629,13 +629,13 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
* stores through to memset.
*/
if (oprsz == maxsz && vece == MO_8) {
TCGv_ptr t_size = tcg_const_ptr(oprsz);
TCGv_ptr t_size = tcg_constant_ptr(oprsz);
TCGv_i32 t_val;
if (in_32) {
t_val = in_32;
} else if (in_64) {
t_val = tcg_temp_new_i32();
t_val = tcg_temp_ebb_new_i32();
tcg_gen_extrl_i64_i32(t_val, in_64);
} else {
t_val = tcg_constant_i32(in_c);
@ -645,7 +645,6 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
if (in_64) {
tcg_temp_free_i32(t_val);
}
tcg_temp_free_ptr(t_size);
tcg_temp_free_ptr(t_ptr);
return;
}
@ -670,7 +669,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
if (in_32) {
fns[vece](t_ptr, t_desc, in_32);
} else if (in_64) {
t_32 = tcg_temp_new_i32();
t_32 = tcg_temp_ebb_new_i32();
tcg_gen_extrl_i64_i32(t_32, in_64);
fns[vece](t_ptr, t_desc, t_32);
tcg_temp_free_i32(t_32);
@ -1734,7 +1733,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
do_dup_store(type, dofs, oprsz, maxsz, t_vec);
tcg_temp_free_vec(t_vec);
} else if (vece <= MO_32) {
TCGv_i32 in = tcg_temp_new_i32();
TCGv_i32 in = tcg_temp_ebb_new_i32();
switch (vece) {
case MO_8:
tcg_gen_ld8u_i32(in, cpu_env, aofs);
@ -1749,7 +1748,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
tcg_temp_free_i32(in);
} else {
TCGv_i64 in = tcg_temp_new_i64();
TCGv_i64 in = tcg_temp_ebb_new_i64();
tcg_gen_ld_i64(in, cpu_env, aofs);
do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
@ -1768,8 +1767,8 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
}
tcg_temp_free_vec(in);
} else {
TCGv_i64 in0 = tcg_temp_new_i64();
TCGv_i64 in1 = tcg_temp_new_i64();
TCGv_i64 in0 = tcg_temp_ebb_new_i64();
TCGv_i64 in1 = tcg_temp_ebb_new_i64();
tcg_gen_ld_i64(in0, cpu_env, aofs);
tcg_gen_ld_i64(in1, cpu_env, aofs + 8);
@ -1814,7 +1813,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
int j;
for (j = 0; j < 4; ++j) {
in[j] = tcg_temp_new_i64();
in[j] = tcg_temp_ebb_new_i64();
tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8);
}
for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
@ -1859,9 +1858,9 @@ void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
the 64-bit operation. */
static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
TCGv_i64 t3 = tcg_temp_ebb_new_i64();
tcg_gen_andc_i64(t1, a, m);
tcg_gen_andc_i64(t2, b, m);
@ -1884,9 +1883,9 @@ void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t3 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
TCGv_i32 t3 = tcg_temp_ebb_new_i32();
tcg_gen_andc_i32(t1, a, m);
tcg_gen_andc_i32(t2, b, m);
@ -1908,8 +1907,8 @@ void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t1, a, ~0xffff);
tcg_gen_add_i32(t2, a, b);
@ -1922,8 +1921,8 @@ void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t1, a, ~0xffffffffull);
tcg_gen_add_i64(t2, a, b);
@ -2042,9 +2041,9 @@ void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
Compare gen_addv_mask above. */
static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
TCGv_i64 t3 = tcg_temp_ebb_new_i64();
tcg_gen_or_i64(t1, a, m);
tcg_gen_andc_i64(t2, b, m);
@ -2067,9 +2066,9 @@ void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t3 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
TCGv_i32 t3 = tcg_temp_ebb_new_i32();
tcg_gen_or_i32(t1, a, m);
tcg_gen_andc_i32(t2, b, m);
@ -2091,8 +2090,8 @@ void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t1, b, ~0xffff);
tcg_gen_sub_i32(t2, a, b);
@ -2105,8 +2104,8 @@ void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t1, b, ~0xffffffffull);
tcg_gen_sub_i64(t2, a, b);
@ -2467,8 +2466,8 @@ void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
Compare gen_subv_mask above. */
static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m)
{
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
TCGv_i64 t3 = tcg_temp_ebb_new_i64();
tcg_gen_andc_i64(t3, m, b);
tcg_gen_andc_i64(t2, b, m);
@ -2493,8 +2492,8 @@ void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b)
void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t1, b, ~0xffffffffull);
tcg_gen_neg_i64(t2, b);
@ -2539,7 +2538,7 @@ void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
int nbit = 8 << vece;
/* Create -1 for each negative element. */
@ -2748,7 +2747,7 @@ static const GVecGen2s gop_ands = {
void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
TCGv_i64 tmp = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_ebb_new_i64();
tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
tcg_temp_free_i64(tmp);
@ -2772,7 +2771,7 @@ static const GVecGen2s gop_xors = {
void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
TCGv_i64 tmp = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_ebb_new_i64();
tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
tcg_temp_free_i64(tmp);
@ -2796,7 +2795,7 @@ static const GVecGen2s gop_ors = {
void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
TCGv_i64 tmp = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_ebb_new_i64();
tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
tcg_temp_free_i64(tmp);
@ -2943,7 +2942,7 @@ void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
{
uint64_t s_mask = dup_const(MO_8, 0x80 >> c);
uint64_t c_mask = dup_const(MO_8, 0xff >> c);
TCGv_i64 s = tcg_temp_new_i64();
TCGv_i64 s = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(d, a, c);
tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
@ -2957,7 +2956,7 @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
{
uint64_t s_mask = dup_const(MO_16, 0x8000 >> c);
uint64_t c_mask = dup_const(MO_16, 0xffff >> c);
TCGv_i64 s = tcg_temp_new_i64();
TCGv_i64 s = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(d, a, c);
tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
@ -2971,7 +2970,7 @@ void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
{
uint32_t s_mask = dup_const(MO_8, 0x80 >> c);
uint32_t c_mask = dup_const(MO_8, 0xff >> c);
TCGv_i32 s = tcg_temp_new_i32();
TCGv_i32 s = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(d, a, c);
tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
@ -2985,7 +2984,7 @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
{
uint32_t s_mask = dup_const(MO_16, 0x8000 >> c);
uint32_t c_mask = dup_const(MO_16, 0xffff >> c);
TCGv_i32 s = tcg_temp_new_i32();
TCGv_i32 s = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(d, a, c);
tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
@ -3179,7 +3178,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
TCGv_vec v_shift = tcg_temp_new_vec(type);
if (vece == MO_64) {
TCGv_i64 sh64 = tcg_temp_new_i64();
TCGv_i64 sh64 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(sh64, shift);
tcg_gen_dup_i64_vec(MO_64, v_shift, sh64);
tcg_temp_free_i64(sh64);
@ -3220,14 +3219,14 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift,
if (vece == MO_32 && check_size_impl(oprsz, 4)) {
expand_2s_i32(dofs, aofs, oprsz, shift, false, g->fni4);
} else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
TCGv_i64 sh64 = tcg_temp_new_i64();
TCGv_i64 sh64 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(sh64, shift);
expand_2s_i64(dofs, aofs, oprsz, sh64, false, g->fni8);
tcg_temp_free_i64(sh64);
} else {
TCGv_ptr a0 = tcg_temp_new_ptr();
TCGv_ptr a1 = tcg_temp_new_ptr();
TCGv_i32 desc = tcg_temp_new_i32();
TCGv_ptr a0 = tcg_temp_ebb_new_ptr();
TCGv_ptr a1 = tcg_temp_ebb_new_ptr();
TCGv_i32 desc = tcg_temp_ebb_new_i32();
tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT);
tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0));
@ -3359,7 +3358,7 @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t, b, 31);
tcg_gen_shl_i32(d, a, t);
@ -3368,7 +3367,7 @@ static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void tcg_gen_shl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t, b, 63);
tcg_gen_shl_i64(d, a, t);
@ -3422,7 +3421,7 @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t, b, 31);
tcg_gen_shr_i32(d, a, t);
@ -3431,7 +3430,7 @@ static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void tcg_gen_shr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t, b, 63);
tcg_gen_shr_i64(d, a, t);
@ -3485,7 +3484,7 @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t, b, 31);
tcg_gen_sar_i32(d, a, t);
@ -3494,7 +3493,7 @@ static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void tcg_gen_sar_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t, b, 63);
tcg_gen_sar_i64(d, a, t);
@ -3548,7 +3547,7 @@ static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d,
static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t, b, 31);
tcg_gen_rotl_i32(d, a, t);
@ -3557,7 +3556,7 @@ static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t, b, 63);
tcg_gen_rotl_i64(d, a, t);
@ -3607,7 +3606,7 @@ static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d,
static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_andi_i32(t, b, 31);
tcg_gen_rotr_i32(d, a, t);
@ -3616,7 +3615,7 @@ static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_andi_i64(t, b, 63);
tcg_gen_rotr_i64(d, a, t);
@ -3657,8 +3656,8 @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, TCGCond cond)
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
@ -3675,8 +3674,8 @@ static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, TCGCond cond)
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
@ -3822,7 +3821,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_and_i64(t, b, a);
tcg_gen_andc_i64(d, c, a);

View File

@ -264,7 +264,7 @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_div_i32) {
tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t0, arg1, 31);
tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
tcg_temp_free_i32(t0);
@ -278,13 +278,13 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_rem_i32) {
tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
tcg_gen_mul_i32(t0, t0, arg2);
tcg_gen_sub_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t0, arg1, 31);
tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
tcg_temp_free_i32(t0);
@ -298,7 +298,7 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_div_i32) {
tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_movi_i32(t0, 0);
tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
tcg_temp_free_i32(t0);
@ -312,13 +312,13 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_rem_i32) {
tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
tcg_gen_mul_i32(t0, t0, arg2);
tcg_gen_sub_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_movi_i32(t0, 0);
tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
tcg_temp_free_i32(t0);
@ -332,7 +332,7 @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_andc_i32) {
tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_not_i32(t0, arg2);
tcg_gen_and_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
@ -374,7 +374,7 @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_orc_i32) {
tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_not_i32(t0, arg2);
tcg_gen_or_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
@ -386,8 +386,8 @@ void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_clz_i32) {
tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_clz_i64) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t1, arg1);
tcg_gen_extu_i32_i64(t2, arg2);
tcg_gen_addi_i64(t2, t2, 32);
@ -411,8 +411,8 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_ctz_i32) {
tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_ctz_i64) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t1, arg1);
tcg_gen_extu_i32_i64(t2, arg2);
tcg_gen_ctz_i64(t1, t1, t2);
@ -423,7 +423,7 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
|| TCG_TARGET_HAS_ctpop_i64
|| TCG_TARGET_HAS_clz_i32
|| TCG_TARGET_HAS_clz_i64) {
TCGv_i32 z, t = tcg_temp_new_i32();
TCGv_i32 z, t = tcg_temp_ebb_new_i32();
if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
tcg_gen_subi_i32(t, arg1, 1);
@ -448,7 +448,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
{
if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
/* This equivalence has the advantage of not requiring a fixup. */
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_subi_i32(t, arg1, 1);
tcg_gen_andc_i32(t, t, arg1);
tcg_gen_ctpop_i32(ret, t);
@ -461,7 +461,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
{
if (TCG_TARGET_HAS_clz_i32) {
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t, arg, 31);
tcg_gen_xor_i32(t, t, arg);
tcg_gen_clzi_i32(t, t, 32);
@ -477,7 +477,7 @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
if (TCG_TARGET_HAS_ctpop_i32) {
tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
} else if (TCG_TARGET_HAS_ctpop_i64) {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t, arg1);
tcg_gen_ctpop_i64(t, t);
tcg_gen_extrl_i64_i32(ret, t);
@ -494,8 +494,8 @@ void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
} else {
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
t0 = tcg_temp_ebb_new_i32();
t1 = tcg_temp_ebb_new_i32();
tcg_gen_shl_i32(t0, arg1, arg2);
tcg_gen_subfi_i32(t1, 32, arg2);
tcg_gen_shr_i32(t1, arg1, t1);
@ -515,8 +515,8 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2));
} else {
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
t0 = tcg_temp_ebb_new_i32();
t1 = tcg_temp_ebb_new_i32();
tcg_gen_shli_i32(t0, arg1, arg2);
tcg_gen_shri_i32(t1, arg1, 32 - arg2);
tcg_gen_or_i32(ret, t0, t1);
@ -532,8 +532,8 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
} else {
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
t0 = tcg_temp_ebb_new_i32();
t1 = tcg_temp_ebb_new_i32();
tcg_gen_shr_i32(t0, arg1, arg2);
tcg_gen_subfi_i32(t1, 32, arg2);
tcg_gen_shl_i32(t1, arg1, t1);
@ -574,7 +574,7 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
return;
}
t1 = tcg_temp_new_i32();
t1 = tcg_temp_ebb_new_i32();
if (TCG_TARGET_HAS_extract2_i32) {
if (ofs + len == 32) {
@ -801,7 +801,7 @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
} else if (TCG_TARGET_HAS_extract2_i32) {
tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(t0, al, ofs);
tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs);
tcg_temp_free_i32(t0);
@ -818,8 +818,8 @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
} else if (TCG_TARGET_HAS_movcond_i32) {
tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
tcg_gen_setcond_i32(cond, t0, c1, c2);
tcg_gen_neg_i32(t0, t0);
tcg_gen_and_i32(t1, v1, t0);
@ -836,8 +836,8 @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
if (TCG_TARGET_HAS_add2_i32) {
tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_concat_i32_i64(t0, al, ah);
tcg_gen_concat_i32_i64(t1, bl, bh);
tcg_gen_add_i64(t0, t0, t1);
@ -853,8 +853,8 @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
if (TCG_TARGET_HAS_sub2_i32) {
tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_concat_i32_i64(t0, al, ah);
tcg_gen_concat_i32_i64(t1, bl, bh);
tcg_gen_sub_i64(t0, t0, t1);
@ -869,14 +869,14 @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_mulu2_i32) {
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
} else if (TCG_TARGET_HAS_muluh_i32) {
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
tcg_gen_mov_i32(rl, t);
tcg_temp_free_i32(t);
} else if (TCG_TARGET_REG_BITS == 64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(t0, arg1);
tcg_gen_extu_i32_i64(t1, arg2);
tcg_gen_mul_i64(t0, t0, t1);
@ -893,16 +893,16 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
if (TCG_TARGET_HAS_muls2_i32) {
tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
} else if (TCG_TARGET_HAS_mulsh_i32) {
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
tcg_gen_mov_i32(rl, t);
tcg_temp_free_i32(t);
} else if (TCG_TARGET_REG_BITS == 32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t3 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
TCGv_i32 t3 = tcg_temp_ebb_new_i32();
tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
/* Adjust for negative inputs. */
tcg_gen_sari_i32(t2, arg1, 31);
@ -917,8 +917,8 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_ext_i32_i64(t0, arg1);
tcg_gen_ext_i32_i64(t1, arg2);
tcg_gen_mul_i64(t0, t0, t1);
@ -931,9 +931,9 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
{
if (TCG_TARGET_REG_BITS == 32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
/* Adjust for negative input for the signed arg1. */
tcg_gen_sari_i32(t2, arg1, 31);
@ -944,8 +944,8 @@ void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_ext_i32_i64(t0, arg1);
tcg_gen_extu_i32_i64(t1, arg2);
tcg_gen_mul_i64(t0, t0, t1);
@ -1001,8 +1001,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
if (TCG_TARGET_HAS_bswap16_i32) {
tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(t0, arg, 8);
if (!(flags & TCG_BSWAP_IZ)) {
@ -1030,8 +1030,8 @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
if (TCG_TARGET_HAS_bswap32_i32) {
tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff);
/* arg = abcd */
@ -1078,7 +1078,7 @@ void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
{
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t, a, 31);
tcg_gen_xor_i32(ret, a, t);
@ -1241,8 +1241,8 @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
TCGv_i64 t0;
TCGv_i32 t1;
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i32();
t0 = tcg_temp_ebb_new_i64();
t1 = tcg_temp_ebb_new_i32();
tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0),
TCGV_LOW(arg1), TCGV_LOW(arg2));
@ -1423,7 +1423,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
tcg_gen_extract2_i32(TCGV_HIGH(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
tcg_gen_deposit_i32(TCGV_HIGH(ret), t0,
TCGV_HIGH(arg1), c, 32 - c);
@ -1557,7 +1557,7 @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_div_i64) {
tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t0, arg1, 63);
tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
tcg_temp_free_i64(t0);
@ -1571,13 +1571,13 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_rem_i64) {
tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
tcg_gen_mul_i64(t0, t0, arg2);
tcg_gen_sub_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t0, arg1, 63);
tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
tcg_temp_free_i64(t0);
@ -1591,7 +1591,7 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_div_i64) {
tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_movi_i64(t0, 0);
tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
tcg_temp_free_i64(t0);
@ -1605,13 +1605,13 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_rem_i64) {
tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
tcg_gen_mul_i64(t0, t0, arg2);
tcg_gen_sub_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_movi_i64(t0, 0);
tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
tcg_temp_free_i64(t0);
@ -1710,8 +1710,8 @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
} else if (TCG_TARGET_HAS_bswap16_i64) {
tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(t0, arg, 8);
if (!(flags & TCG_BSWAP_IZ)) {
@ -1749,8 +1749,8 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
} else if (TCG_TARGET_HAS_bswap32_i64) {
tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff);
/* arg = xxxxabcd */
@ -1778,8 +1778,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
{
if (TCG_TARGET_REG_BITS == 32) {
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
t0 = tcg_temp_ebb_new_i32();
t1 = tcg_temp_ebb_new_i32();
tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
@ -1790,9 +1790,9 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
} else if (TCG_TARGET_HAS_bswap64_i64) {
tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
/* arg = abcdefgh */
tcg_gen_movi_i64(t2, 0x00ff00ff00ff00ffull);
@ -1822,8 +1822,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg)
{
uint64_t m = 0x0000ffff0000ffffull;
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
/* See include/qemu/bitops.h, hswap64. */
tcg_gen_rotli_i64(t1, arg, 32);
@ -1863,7 +1863,7 @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
} else if (TCG_TARGET_HAS_andc_i64) {
tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_not_i64(t0, arg2);
tcg_gen_and_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
@ -1917,7 +1917,7 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
} else if (TCG_TARGET_HAS_orc_i64) {
tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_not_i64(t0, arg2);
tcg_gen_or_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
@ -1938,7 +1938,7 @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
if (TCG_TARGET_REG_BITS == 32
&& TCG_TARGET_HAS_clz_i32
&& arg2 <= 0xffffffffu) {
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32);
tcg_gen_addi_i32(t, t, 32);
tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t);
@ -1956,7 +1956,7 @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_ctz_i64) {
tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) {
TCGv_i64 z, t = tcg_temp_new_i64();
TCGv_i64 z, t = tcg_temp_ebb_new_i64();
if (TCG_TARGET_HAS_ctpop_i64) {
tcg_gen_subi_i64(t, arg1, 1);
@ -1983,7 +1983,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
if (TCG_TARGET_REG_BITS == 32
&& TCG_TARGET_HAS_ctz_i32
&& arg2 <= 0xffffffffu) {
TCGv_i32 t32 = tcg_temp_new_i32();
TCGv_i32 t32 = tcg_temp_ebb_new_i32();
tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32);
tcg_gen_addi_i32(t32, t32, 32);
tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
@ -1993,7 +1993,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
&& TCG_TARGET_HAS_ctpop_i64
&& arg2 == 64) {
/* This equivalence has the advantage of not requiring a fixup. */
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_subi_i64(t, arg1, 1);
tcg_gen_andc_i64(t, t, arg1);
tcg_gen_ctpop_i64(ret, t);
@ -2008,7 +2008,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
{
if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t, arg, 63);
tcg_gen_xor_i64(t, t, arg);
tcg_gen_clzi_i64(t, t, 64);
@ -2039,8 +2039,8 @@ void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
} else {
TCGv_i64 t0, t1;
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
t0 = tcg_temp_ebb_new_i64();
t1 = tcg_temp_ebb_new_i64();
tcg_gen_shl_i64(t0, arg1, arg2);
tcg_gen_subfi_i64(t1, 64, arg2);
tcg_gen_shr_i64(t1, arg1, t1);
@ -2060,8 +2060,8 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2));
} else {
TCGv_i64 t0, t1;
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
t0 = tcg_temp_ebb_new_i64();
t1 = tcg_temp_ebb_new_i64();
tcg_gen_shli_i64(t0, arg1, arg2);
tcg_gen_shri_i64(t1, arg1, 64 - arg2);
tcg_gen_or_i64(ret, t0, t1);
@ -2076,8 +2076,8 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
} else {
TCGv_i64 t0, t1;
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
t0 = tcg_temp_ebb_new_i64();
t1 = tcg_temp_ebb_new_i64();
tcg_gen_shr_i64(t0, arg1, arg2);
tcg_gen_subfi_i64(t1, 64, arg2);
tcg_gen_shl_i64(t1, arg1, t1);
@ -2133,7 +2133,7 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
}
}
t1 = tcg_temp_new_i64();
t1 = tcg_temp_ebb_new_i64();
if (TCG_TARGET_HAS_extract2_i64) {
if (ofs + len == 64) {
@ -2365,7 +2365,7 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32);
return;
} else if (len > 32) {
TCGv_i32 t = tcg_temp_new_i32();
TCGv_i32 t = tcg_temp_ebb_new_i32();
/* Extract the bits for the high word normally. */
tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32);
/* Shift the field down for the low part. */
@ -2460,7 +2460,7 @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
} else if (TCG_TARGET_HAS_extract2_i64) {
tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(t0, al, ofs);
tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs);
tcg_temp_free_i64(t0);
@ -2475,8 +2475,8 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
} else if (cond == TCG_COND_NEVER) {
tcg_gen_mov_i64(ret, v2);
} else if (TCG_TARGET_REG_BITS == 32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0,
TCGV_LOW(c1), TCGV_HIGH(c1),
TCGV_LOW(c2), TCGV_HIGH(c2), cond);
@ -2503,8 +2503,8 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
} else if (TCG_TARGET_HAS_movcond_i64) {
tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_setcond_i64(cond, t0, c1, c2);
tcg_gen_neg_i64(t0, t0);
tcg_gen_and_i64(t1, v1, t0);
@ -2521,8 +2521,8 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
if (TCG_TARGET_HAS_add2_i64) {
tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_add_i64(t0, al, bl);
tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al);
tcg_gen_add_i64(rh, ah, bh);
@ -2539,8 +2539,8 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
if (TCG_TARGET_HAS_sub2_i64) {
tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
tcg_gen_sub_i64(t0, al, bl);
tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl);
tcg_gen_sub_i64(rh, ah, bh);
@ -2556,13 +2556,13 @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_mulu2_i64) {
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
} else if (TCG_TARGET_HAS_muluh_i64) {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_mul_i64(t0, arg1, arg2);
gen_helper_muluh_i64(rh, arg1, arg2);
tcg_gen_mov_i64(rl, t0);
@ -2575,16 +2575,16 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_HAS_muls2_i64) {
tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
} else if (TCG_TARGET_HAS_mulsh_i64) {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
} else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
TCGv_i64 t3 = tcg_temp_ebb_new_i64();
tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
/* Adjust for negative inputs. */
tcg_gen_sari_i64(t2, arg1, 63);
@ -2599,7 +2599,7 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_mul_i64(t0, arg1, arg2);
gen_helper_mulsh_i64(rh, arg1, arg2);
tcg_gen_mov_i64(rl, t0);
@ -2609,9 +2609,9 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
/* Adjust for negative input for the signed arg1. */
tcg_gen_sari_i64(t2, arg1, 63);
@ -2645,7 +2645,7 @@ void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a)
{
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t, a, 63);
tcg_gen_xor_i64(ret, a, t);
@ -2675,7 +2675,7 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
tcg_gen_op2(INDEX_op_extrh_i64_i32,
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
} else {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(t, arg, 32);
tcg_gen_mov_i32(ret, (TCGv_i32)t);
tcg_temp_free_i64(t);
@ -2714,7 +2714,7 @@ void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
return;
}
tmp = tcg_temp_new_i64();
tmp = tcg_temp_ebb_new_i64();
/* These extensions are only needed for type correctness.
We may be able to do better given target specific information. */
tcg_gen_extu_i32_i64(tmp, high);
@ -2826,7 +2826,7 @@ void tcg_gen_lookup_and_goto_ptr(void)
}
plugin_gen_disable_mem_helpers();
ptr = tcg_temp_new_ptr();
ptr = tcg_temp_ebb_new_ptr();
gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
@ -2987,7 +2987,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
oi = make_memop_idx(memop, idx);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i32();
swap = tcg_temp_ebb_new_i32();
switch (memop & MO_SIZE) {
case MO_16:
tcg_gen_bswap16_i32(swap, val, 0);
@ -3082,7 +3082,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
oi = make_memop_idx(memop, idx);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i64();
swap = tcg_temp_ebb_new_i64();
switch (memop & MO_SIZE) {
case MO_16:
tcg_gen_bswap16_i64(swap, val, 0);
@ -3224,7 +3224,7 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
addr_p8 = tcg_temp_new();
if ((mop[0] ^ memop) & MO_BSWAP) {
TCGv_i64 t = tcg_temp_new_i64();
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_bswap64_i64(t, x);
gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx);
@ -3328,8 +3328,8 @@ static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
TCGv_i32 newv, TCGArg idx, MemOp memop)
{
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
@ -3385,8 +3385,8 @@ void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
return;
}
t1 = tcg_temp_new_i64();
t2 = tcg_temp_new_i64();
t1 = tcg_temp_ebb_new_i64();
t2 = tcg_temp_ebb_new_i64();
tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
@ -3442,9 +3442,9 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
}
} else {
TCGv_i32 c32 = tcg_temp_new_i32();
TCGv_i32 n32 = tcg_temp_new_i32();
TCGv_i32 r32 = tcg_temp_new_i32();
TCGv_i32 c32 = tcg_temp_ebb_new_i32();
TCGv_i32 n32 = tcg_temp_ebb_new_i32();
TCGv_i32 r32 = tcg_temp_ebb_new_i32();
tcg_gen_extrl_i64_i32(c32, cmpv);
tcg_gen_extrl_i64_i32(n32, newv);
@ -3476,10 +3476,10 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv,
gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
} else {
TCGv_i128 oldv = tcg_temp_new_i128();
TCGv_i128 tmpv = tcg_temp_new_i128();
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i128 oldv = tcg_temp_ebb_new_i128();
TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 z = tcg_constant_i64(0);
tcg_gen_qemu_ld_i128(oldv, addr, idx, memop);
@ -3541,8 +3541,8 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
TCGArg idx, MemOp memop, bool new_val,
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
{
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
TCGv_i32 t2 = tcg_temp_ebb_new_i32();
memop = tcg_canonicalize_memop(memop, 0, 0);
@ -3579,8 +3579,8 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
TCGArg idx, MemOp memop, bool new_val,
void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
memop = tcg_canonicalize_memop(memop, 1, 0);
@ -3616,8 +3616,8 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
tcg_gen_movi_i64(ret, 0);
#endif /* CONFIG_ATOMIC64 */
} else {
TCGv_i32 v32 = tcg_temp_new_i32();
TCGv_i32 r32 = tcg_temp_new_i32();
TCGv_i32 v32 = tcg_temp_ebb_new_i32();
TCGv_i32 r32 = tcg_temp_ebb_new_i32();
tcg_gen_extrl_i64_i32(v32, val);
do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);

280
tcg/tcg.c
View File

@ -1254,66 +1254,69 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
return ts;
}
TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
{
TCGContext *s = tcg_ctx;
TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
TCGTemp *ts;
int idx, k;
int n;
k = type + (temp_local ? TCG_TYPE_COUNT : 0);
idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
if (idx < TCG_MAX_TEMPS) {
/* There is already an available temp with the right type. */
clear_bit(idx, s->free_temps[k].l);
if (kind == TEMP_EBB) {
int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS);
ts = &s->temps[idx];
ts->temp_allocated = 1;
tcg_debug_assert(ts->base_type == type);
tcg_debug_assert(ts->kind == kind);
} else {
int i, n;
if (idx < TCG_MAX_TEMPS) {
/* There is already an available temp with the right type. */
clear_bit(idx, s->free_temps[type].l);
switch (type) {
case TCG_TYPE_I32:
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
n = 1;
break;
case TCG_TYPE_I64:
n = 64 / TCG_TARGET_REG_BITS;
break;
case TCG_TYPE_I128:
n = 128 / TCG_TARGET_REG_BITS;
break;
default:
g_assert_not_reached();
ts = &s->temps[idx];
ts->temp_allocated = 1;
tcg_debug_assert(ts->base_type == type);
tcg_debug_assert(ts->kind == kind);
goto done;
}
} else {
tcg_debug_assert(kind == TEMP_TB);
}
ts = tcg_temp_alloc(s);
ts->base_type = type;
ts->temp_allocated = 1;
ts->kind = kind;
switch (type) {
case TCG_TYPE_I32:
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
n = 1;
break;
case TCG_TYPE_I64:
n = 64 / TCG_TARGET_REG_BITS;
break;
case TCG_TYPE_I128:
n = 128 / TCG_TARGET_REG_BITS;
break;
default:
g_assert_not_reached();
}
if (n == 1) {
ts->type = type;
} else {
ts->type = TCG_TYPE_REG;
ts = tcg_temp_alloc(s);
ts->base_type = type;
ts->temp_allocated = 1;
ts->kind = kind;
for (i = 1; i < n; ++i) {
TCGTemp *ts2 = tcg_temp_alloc(s);
if (n == 1) {
ts->type = type;
} else {
ts->type = TCG_TYPE_REG;
tcg_debug_assert(ts2 == ts + i);
ts2->base_type = type;
ts2->type = TCG_TYPE_REG;
ts2->temp_allocated = 1;
ts2->temp_subindex = i;
ts2->kind = kind;
}
for (int i = 1; i < n; ++i) {
TCGTemp *ts2 = tcg_temp_alloc(s);
tcg_debug_assert(ts2 == ts + i);
ts2->base_type = type;
ts2->type = TCG_TYPE_REG;
ts2->temp_allocated = 1;
ts2->temp_subindex = i;
ts2->kind = kind;
}
}
done:
#if defined(CONFIG_DEBUG_TCG)
s->temps_in_use++;
#endif
@ -1340,7 +1343,7 @@ TCGv_vec tcg_temp_new_vec(TCGType type)
}
#endif
t = tcg_temp_new_internal(type, 0);
t = tcg_temp_new_internal(type, TEMP_EBB);
return temp_tcgv_vec(t);
}
@ -1351,14 +1354,13 @@ TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
tcg_debug_assert(t->temp_allocated != 0);
t = tcg_temp_new_internal(t->base_type, 0);
t = tcg_temp_new_internal(t->base_type, TEMP_EBB);
return temp_tcgv_vec(t);
}
void tcg_temp_free_internal(TCGTemp *ts)
{
TCGContext *s = tcg_ctx;
int k, idx;
switch (ts->kind) {
case TEMP_CONST:
@ -1367,26 +1369,25 @@ void tcg_temp_free_internal(TCGTemp *ts)
* silently ignore free.
*/
return;
case TEMP_NORMAL:
case TEMP_LOCAL:
case TEMP_EBB:
case TEMP_TB:
break;
default:
g_assert_not_reached();
}
#if defined(CONFIG_DEBUG_TCG)
s->temps_in_use--;
if (s->temps_in_use < 0) {
fprintf(stderr, "More temporaries freed than allocated!\n");
}
#endif
tcg_debug_assert(ts->temp_allocated != 0);
ts->temp_allocated = 0;
idx = temp_idx(ts);
k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
set_bit(idx, s->free_temps[k].l);
#if defined(CONFIG_DEBUG_TCG)
assert(s->temps_in_use > 0);
s->temps_in_use--;
#endif
if (ts->kind == TEMP_EBB) {
int idx = temp_idx(ts);
set_bit(idx, s->free_temps[ts->base_type].l);
}
}
TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
@ -1474,22 +1475,6 @@ TCGv_i64 tcg_const_i64(int64_t val)
return t0;
}
TCGv_i32 tcg_const_local_i32(int32_t val)
{
TCGv_i32 t0;
t0 = tcg_temp_local_new_i32();
tcg_gen_movi_i32(t0, val);
return t0;
}
TCGv_i64 tcg_const_local_i64(int64_t val)
{
TCGv_i64 t0;
t0 = tcg_temp_local_new_i64();
tcg_gen_movi_i64(t0, val);
return t0;
}
#if defined(CONFIG_DEBUG_TCG)
void tcg_clear_temp_count(void)
{
@ -1866,7 +1851,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
case TCG_CALL_ARG_EXTEND_U:
case TCG_CALL_ARG_EXTEND_S:
{
TCGv_i64 temp = tcg_temp_new_i64();
TCGv_i64 temp = tcg_temp_ebb_new_i64();
TCGv_i32 orig = temp_tcgv_i32(ts);
if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
@ -1912,11 +1897,10 @@ static void tcg_reg_alloc_start(TCGContext *s)
break;
case TEMP_GLOBAL:
break;
case TEMP_NORMAL:
case TEMP_EBB:
val = TEMP_VAL_DEAD;
/* fall through */
case TEMP_LOCAL:
case TEMP_TB:
ts->mem_allocated = 0;
break;
default:
@ -1938,13 +1922,10 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
case TEMP_GLOBAL:
pstrcpy(buf, buf_size, ts->name);
break;
case TEMP_LOCAL:
case TEMP_TB:
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
break;
case TEMP_EBB:
snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals);
break;
case TEMP_NORMAL:
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
break;
case TEMP_CONST:
@ -2637,9 +2618,10 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
}
/* Reachable analysis : remove unreachable code. */
static void reachable_code_pass(TCGContext *s)
static void __attribute__((noinline))
reachable_code_pass(TCGContext *s)
{
TCGOp *op, *op_next;
TCGOp *op, *op_next, *op_prev;
bool dead = false;
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
@ -2649,6 +2631,22 @@ static void reachable_code_pass(TCGContext *s)
switch (op->opc) {
case INDEX_op_set_label:
label = arg_label(op->args[0]);
/*
* Optimization can fold conditional branches to unconditional.
* If we find a label which is preceded by an unconditional
* branch to next, remove the branch. We couldn't do this when
* processing the branch because any dead code between the branch
* and label had not yet been removed.
*/
op_prev = QTAILQ_PREV(op, link);
if (op_prev->opc == INDEX_op_br &&
label == arg_label(op_prev->args[0])) {
tcg_op_remove(s, op_prev);
/* Fall through means insns become live again. */
dead = false;
}
if (label->refs == 0) {
/*
* While there is an occasional backward branch, virtually
@ -2662,21 +2660,6 @@ static void reachable_code_pass(TCGContext *s)
/* Once we see a label, insns become live again. */
dead = false;
remove = false;
/*
* Optimization can fold conditional branches to unconditional.
* If we find a label with one reference which is preceded by
* an unconditional branch to it, remove both. This needed to
* wait until the dead code in between them was removed.
*/
if (label->refs == 1) {
TCGOp *op_prev = QTAILQ_PREV(op, link);
if (op_prev->opc == INDEX_op_br &&
label == arg_label(op_prev->args[0])) {
tcg_op_remove(s, op_prev);
remove = true;
}
}
}
break;
@ -2759,10 +2742,9 @@ static void la_bb_end(TCGContext *s, int ng, int nt)
switch (ts->kind) {
case TEMP_FIXED:
case TEMP_GLOBAL:
case TEMP_LOCAL:
case TEMP_TB:
state = TS_DEAD | TS_MEM;
break;
case TEMP_NORMAL:
case TEMP_EBB:
case TEMP_CONST:
state = TS_DEAD;
@ -2804,16 +2786,13 @@ static void la_bb_sync(TCGContext *s, int ng, int nt)
int state;
switch (ts->kind) {
case TEMP_LOCAL:
case TEMP_TB:
state = ts->state;
ts->state = state | TS_MEM;
if (state != TS_DEAD) {
continue;
}
break;
case TEMP_NORMAL:
s->temps[i].state = TS_DEAD;
break;
case TEMP_EBB:
case TEMP_CONST:
continue;
@ -2857,10 +2836,80 @@ static void la_cross_call(TCGContext *s, int nt)
}
}
/*
* Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
* to TEMP_EBB, if possible.
*/
static void __attribute__((noinline))
liveness_pass_0(TCGContext *s)
{
void * const multiple_ebb = (void *)(uintptr_t)-1;
int nb_temps = s->nb_temps;
TCGOp *op, *ebb;
for (int i = s->nb_globals; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
/*
* Represent each EBB by the op at which it begins. In the case of
* the first EBB, this is the first op, otherwise it is a label.
* Collect the uses of each TEMP_TB: NULL for unused, EBB for use
* within a single EBB, else MULTIPLE_EBB.
*/
ebb = QTAILQ_FIRST(&s->ops);
QTAILQ_FOREACH(op, &s->ops, link) {
const TCGOpDef *def;
int nb_oargs, nb_iargs;
switch (op->opc) {
case INDEX_op_set_label:
ebb = op;
continue;
case INDEX_op_discard:
continue;
case INDEX_op_call:
nb_oargs = TCGOP_CALLO(op);
nb_iargs = TCGOP_CALLI(op);
break;
default:
def = &tcg_op_defs[op->opc];
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
break;
}
for (int i = 0; i < nb_oargs + nb_iargs; ++i) {
TCGTemp *ts = arg_temp(op->args[i]);
if (ts->kind != TEMP_TB) {
continue;
}
if (ts->state_ptr == NULL) {
ts->state_ptr = ebb;
} else if (ts->state_ptr != ebb) {
ts->state_ptr = multiple_ebb;
}
}
}
/*
* For TEMP_TB that turned out not to be used beyond one EBB,
* reduce the liveness to TEMP_EBB.
*/
for (int i = s->nb_globals; i < nb_temps; ++i) {
TCGTemp *ts = &s->temps[i];
if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) {
ts->kind = TEMP_EBB;
}
}
}
/* Liveness analysis : update the opc_arg_life array to tell if a
given input arguments is dead. Instructions updating dead
temporaries are removed. */
static void liveness_pass_1(TCGContext *s)
static void __attribute__((noinline))
liveness_pass_1(TCGContext *s)
{
int nb_globals = s->nb_globals;
int nb_temps = s->nb_temps;
@ -3200,7 +3249,8 @@ static void liveness_pass_1(TCGContext *s)
}
/* Liveness analysis: Convert indirect regs to direct temporaries. */
static bool liveness_pass_2(TCGContext *s)
static bool __attribute__((noinline))
liveness_pass_2(TCGContext *s)
{
int nb_globals = s->nb_globals;
int nb_temps, i;
@ -3497,10 +3547,9 @@ static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
case TEMP_FIXED:
return;
case TEMP_GLOBAL:
case TEMP_LOCAL:
case TEMP_TB:
new_type = TEMP_VAL_MEM;
break;
case TEMP_NORMAL:
case TEMP_EBB:
new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
break;
@ -3785,10 +3834,9 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
TCGTemp *ts = &s->temps[i];
switch (ts->kind) {
case TEMP_LOCAL:
case TEMP_TB:
temp_save(s, ts, allocated_regs);
break;
case TEMP_NORMAL:
case TEMP_EBB:
/* The liveness analysis already ensures that temps are dead.
Keep an tcg_debug_assert for safety. */
@ -3822,12 +3870,9 @@ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
* Keep tcg_debug_asserts for safety.
*/
switch (ts->kind) {
case TEMP_LOCAL:
case TEMP_TB:
tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
break;
case TEMP_NORMAL:
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
break;
case TEMP_EBB:
case TEMP_CONST:
break;
@ -4870,6 +4915,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
#endif
reachable_code_pass(s);
liveness_pass_0(s);
liveness_pass_1(s);
if (s->nb_indirects > 0) {