accel: Introduce AccelClass::cpu_common_[un]realize

accel: Target agnostic code movement
 accel/tcg: Cleanups to use CPUState instead of CPUArchState
 accel/tcg: Move CPUNegativeOffsetState into CPUState
 tcg: Split out tcg init functions to tcg/startup.h
 linux-user/hppa: Fix struct target_sigcontext layout
 build: Remove --enable-gprof
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmUdsL4dHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/iYggAvDJEyMCAXSSH97BA
 wZT/2D/MFIhOMk6xrQRnrXfrG70N0iVKz44jl9j7k1D+9BOHcso//DDJH3c96k9A
 MgDb6W2bsWvC15/Qw6BALf5bb/II0MJuCcQvj3CNX5lNkXAWhwIOBhsZx7V9ST1+
 rihN4nowpRWdV5GeCjDGaJW455Y1gc96hICYHy6Eqw1cUgUFt9vm5aYU3FHlat29
 sYRaVYKUL2hRUPPNcPiPq0AaJ8wN6/s8gT+V1UvTzkhHqskoM4ZU89RchuXVoq1h
 SvhKElyULMRzM7thWtpW8qYJPj4mxZsKArESvHjsunGD6KEz3Fh1sy6EKRcdmpG/
 II1vkg==
 =k2Io
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20231004' of https://gitlab.com/rth7680/qemu into staging

accel: Introduce AccelClass::cpu_common_[un]realize
accel: Target agnostic code movement
accel/tcg: Cleanups to use CPUState instead of CPUArchState
accel/tcg: Move CPUNegativeOffsetState into CPUState
tcg: Split out tcg init functions to tcg/startup.h
linux-user/hppa: Fix struct target_sigcontext layout
build: Remove --enable-gprof

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmUdsL4dHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/iYggAvDJEyMCAXSSH97BA
# wZT/2D/MFIhOMk6xrQRnrXfrG70N0iVKz44jl9j7k1D+9BOHcso//DDJH3c96k9A
# MgDb6W2bsWvC15/Qw6BALf5bb/II0MJuCcQvj3CNX5lNkXAWhwIOBhsZx7V9ST1+
# rihN4nowpRWdV5GeCjDGaJW455Y1gc96hICYHy6Eqw1cUgUFt9vm5aYU3FHlat29
# sYRaVYKUL2hRUPPNcPiPq0AaJ8wN6/s8gT+V1UvTzkhHqskoM4ZU89RchuXVoq1h
# SvhKElyULMRzM7thWtpW8qYJPj4mxZsKArESvHjsunGD6KEz3Fh1sy6EKRcdmpG/
# II1vkg==
# =k2Io
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 04 Oct 2023 14:36:46 EDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20231004' of https://gitlab.com/rth7680/qemu: (47 commits)
  tcg/loongarch64: Fix buid error
  tests/avocado: Re-enable MIPS Malta tests (GitLab issue #1884 fixed)
  build: Remove --enable-gprof
  linux-user/hppa: Fix struct target_sigcontext layout
  tcg: Split out tcg init functions to tcg/startup.h
  tcg: Remove argument to tcg_prologue_init
  accel/tcg: Make cpu-exec-common.c a target agnostic unit
  accel/tcg: Make icount.o a target agnostic unit
  accel/tcg: Make monitor.c a target-agnostic unit
  accel/tcg: Rename target-specific 'internal.h' -> 'internal-target.h'
  exec: Rename target specific page-vary.c -> page-vary-target.c
  exec: Rename cpu.c -> cpu-target.c
  accel: Rename accel-common.c -> accel-target.c
  accel: Make accel-blocker.o target agnostic
  accel/tcg: Restrict dump_exec_info() declaration
  exec: Move cpu_loop_foo() target agnostic functions to 'cpu-common.h'
  exec: Make EXCP_FOO definitions target agnostic
  accel/tcg: move ld/st helpers to ldst_common.c.inc
  accel/tcg: Unify user and softmmu do_[st|ld]*_mmu()
  accel/tcg: Remove env_tlb()
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-10-05 08:55:34 -04:00
commit 800af0aae1
225 changed files with 5101 additions and 5323 deletions

View File

@ -139,8 +139,9 @@ R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained S: Maintained
F: softmmu/cpus.c F: softmmu/cpus.c
F: softmmu/watchpoint.c F: softmmu/watchpoint.c
F: cpus-common.c F: cpu-common.c
F: page-vary.c F: cpu-target.c
F: page-vary-target.c
F: page-vary-common.c F: page-vary-common.c
F: accel/tcg/ F: accel/tcg/
F: accel/stubs/tcg-stub.c F: accel/stubs/tcg-stub.c
@ -1766,7 +1767,6 @@ M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
R: Philippe Mathieu-Daudé <philmd@linaro.org> R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Yanan Wang <wangyanan55@huawei.com> R: Yanan Wang <wangyanan55@huawei.com>
S: Supported S: Supported
F: cpu.c
F: hw/core/cpu.c F: hw/core/cpu.c
F: hw/core/machine-qmp-cmds.c F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c F: hw/core/machine.c
@ -2906,7 +2906,6 @@ F: softmmu/main.c
F: softmmu/cpus.c F: softmmu/cpus.c
F: softmmu/cpu-throttle.c F: softmmu/cpu-throttle.c
F: softmmu/cpu-timers.c F: softmmu/cpu-timers.c
F: softmmu/icount.c
F: softmmu/runstate* F: softmmu/runstate*
F: qapi/run-state.json F: qapi/run-state.json

View File

@ -119,16 +119,37 @@ void accel_cpu_instance_init(CPUState *cpu)
} }
} }
bool accel_cpu_realizefn(CPUState *cpu, Error **errp) bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
AccelState *accel = current_accel();
AccelClass *acc = ACCEL_GET_CLASS(accel);
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) { /* target specific realization */
return cc->accel_cpu->cpu_realizefn(cpu, errp); if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
return false;
} }
/* generic realization */
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
return false;
}
return true; return true;
} }
void accel_cpu_common_unrealize(CPUState *cpu)
{
AccelState *accel = current_accel();
AccelClass *acc = ACCEL_GET_CLASS(accel);
/* generic unrealization */
if (acc->cpu_common_unrealize) {
acc->cpu_common_unrealize(cpu);
}
}
int accel_supported_gdbstub_sstep_flags(void) int accel_supported_gdbstub_sstep_flags(void)
{ {
AccelState *accel = current_accel(); AccelState *accel = current_accel();

View File

@ -27,7 +27,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
#ifndef _WIN32 #ifndef _WIN32

View File

@ -428,7 +428,7 @@ static void *hvf_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
hvf_init_vcpu(cpu); hvf_init_vcpu(cpu);

View File

@ -36,7 +36,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal); r = kvm_init_vcpu(cpu, &error_fatal);

View File

@ -1,5 +1,5 @@
specific_ss.add(files('accel-common.c', 'accel-blocker.c')) specific_ss.add(files('accel-target.c'))
system_ss.add(files('accel-softmmu.c')) system_ss.add(files('accel-softmmu.c', 'accel-blocker.c'))
user_ss.add(files('accel-user.c')) user_ss.add(files('accel-user.c'))
subdir('tcg') subdir('tcg')

View File

@ -73,7 +73,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv, ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
#if DATA_SIZE == 16 #if DATA_SIZE == 16
@ -90,7 +91,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val); ret = qatomic_xchg__nocheck(haddr, val);
@ -104,7 +106,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
DATA_TYPE *haddr, ret; \ DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \ ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \ atomic_trace_rmw_post(env, addr, oi); \
@ -135,7 +137,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \ XDATA_TYPE *haddr, cmp, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \ smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \ cmp = qatomic_read__nocheck(haddr); \
do { \ do { \
@ -176,7 +178,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv, ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
#if DATA_SIZE == 16 #if DATA_SIZE == 16
@ -193,7 +196,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
ABI_TYPE ret; ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@ -207,7 +211,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
DATA_TYPE *haddr, ret; \ DATA_TYPE *haddr, ret; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \ ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \ atomic_trace_rmw_post(env, addr, oi); \
@ -235,7 +239,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \ { \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \ XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \ smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \ ldn = qatomic_read__nocheck(haddr); \
do { \ do { \

View File

@ -20,9 +20,8 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "exec/exec-all.h"
#include "qemu/plugin.h" #include "qemu/plugin.h"
#include "internal.h" #include "internal-common.h"
bool tcg_allowed; bool tcg_allowed;
@ -36,7 +35,7 @@ void cpu_loop_exit_noexc(CPUState *cpu)
void cpu_loop_exit(CPUState *cpu) void cpu_loop_exit(CPUState *cpu)
{ {
/* Undo the setting in cpu_tb_exec. */ /* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
/* Undo any setting in generated code. */ /* Undo any setting in generated code. */
qemu_plugin_disable_mem_helpers(cpu); qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1); siglongjmp(cpu->jmp_env, 1);

View File

@ -42,7 +42,8 @@
#include "tb-jmp-cache.h" #include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal-common.h"
#include "internal-target.h"
/* -icount align implementation. */ /* -icount align implementation. */
@ -73,7 +74,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
return; return;
} }
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
sc->last_cpu_icount = cpu_icount; sc->last_cpu_icount = cpu_icount;
@ -124,7 +125,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
sc->last_cpu_icount sc->last_cpu_icount
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
if (sc->diff_clk < max_delay) { if (sc->diff_clk < max_delay) {
max_delay = sc->diff_clk; max_delay = sc->diff_clk;
} }
@ -222,7 +223,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
struct tb_desc desc; struct tb_desc desc;
uint32_t h; uint32_t h;
desc.env = cpu->env_ptr; desc.env = cpu_env(cpu);
desc.cs_base = cs_base; desc.cs_base = cs_base;
desc.flags = flags; desc.flags = flags;
desc.cflags = cflags; desc.cflags = cflags;
@ -444,7 +445,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
static inline TranslationBlock * QEMU_DISABLE_CFI static inline TranslationBlock * QEMU_DISABLE_CFI
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
uintptr_t ret; uintptr_t ret;
TranslationBlock *last_tb; TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr; const void *tb_ptr = itb->tc.ptr;
@ -455,7 +456,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
qemu_thread_jit_execute(); qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr); ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
qemu_plugin_disable_mem_helpers(cpu); qemu_plugin_disable_mem_helpers(cpu);
/* /*
* TODO: Delay swapping back to the read-write region of the TB * TODO: Delay swapping back to the read-write region of the TB
@ -565,7 +566,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb; TranslationBlock *tb;
vaddr pc; vaddr pc;
uint64_t cs_base; uint64_t cs_base;
@ -717,7 +718,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (cpu->exception_index < 0) { if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
if (replay_has_exception() if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */ /* Execute just one insn to trigger exception pending in the log */
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
| CF_LAST_IO | CF_NOIRQ | 1; | CF_LAST_IO | CF_NOIRQ | 1;
@ -807,7 +808,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or * Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit()) * cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/ */
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0); qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) { if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request; int interrupt_request;
@ -898,7 +899,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(qatomic_read(&cpu->exit_request)) if (unlikely(qatomic_read(&cpu->exit_request))
|| (icount_enabled() || (icount_enabled()
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
qatomic_set(&cpu->exit_request, 0); qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) { if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
@ -923,7 +924,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
} }
*last_tb = NULL; *last_tb = NULL;
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
if (insns_left < 0) { if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just /* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit * continue round the main loop. Whatever requested the exit
@ -942,7 +943,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
icount_update(cpu); icount_update(cpu);
/* Refill decrementer and continue execution. */ /* Refill decrementer and continue execution. */
insns_left = MIN(0xffff, cpu->icount_budget); insns_left = MIN(0xffff, cpu->icount_budget);
cpu_neg(cpu)->icount_decr.u16.low = insns_left; cpu->neg.icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left; cpu->icount_extra = cpu->icount_budget - insns_left;
/* /*
@ -976,7 +977,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
uint64_t cs_base; uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
/* /*
* When requested, use an exact setting for cflags for the next * When requested, use an exact setting for cflags for the next
@ -1088,7 +1089,7 @@ int cpu_exec(CPUState *cpu)
return ret; return ret;
} }
void tcg_exec_realizefn(CPUState *cpu, Error **errp) bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
{ {
static bool tcg_target_initialized; static bool tcg_target_initialized;
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
@ -1104,6 +1105,8 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
tcg_iommu_init_notifier_list(cpu); tcg_iommu_init_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
return true;
} }
/* undo the initializations in reverse order */ /* undo the initializations in reverse order */

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,6 @@
#include "migration/vmstate.h" #include "migration/vmstate.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "exec/exec-all.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "sysemu/qtest.h" #include "sysemu/qtest.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
@ -38,7 +37,7 @@
#include "hw/core/cpu.h" #include "hw/core/cpu.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "sysemu/cpu-throttle.h" #include "sysemu/cpu-throttle.h"
#include "timers-state.h" #include "softmmu/timers-state.h"
/* /*
* ICOUNT: Instruction Counter * ICOUNT: Instruction Counter
@ -75,7 +74,7 @@ static void icount_enable_adaptive(void)
static int64_t icount_get_executed(CPUState *cpu) static int64_t icount_get_executed(CPUState *cpu)
{ {
return (cpu->icount_budget - return (cpu->icount_budget -
(cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra)); (cpu->neg.icount_decr.u16.low + cpu->icount_extra));
} }
/* /*
@ -111,7 +110,7 @@ static int64_t icount_get_raw_locked(void)
CPUState *cpu = current_cpu; CPUState *cpu = current_cpu;
if (cpu && cpu->running) { if (cpu && cpu->running) {
if (!cpu->can_do_io) { if (!cpu->neg.can_do_io) {
error_report("Bad icount read"); error_report("Bad icount read");
exit(1); exit(1);
} }

View File

@ -0,0 +1,28 @@
/*
* Internal execution defines for qemu (target agnostic)
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
#define ACCEL_TCG_INTERNAL_COMMON_H
#include "exec/translation-block.h"
extern int64_t max_delay;
extern int64_t max_advance;
void dump_exec_info(GString *buf);
/*
* Return true if CS is not running in parallel with other cpus, either
* because there are no other cpus or we are within an exclusive context.
*/
static inline bool cpu_in_serial_context(CPUState *cs)
{
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
}
#endif

View File

@ -1,13 +1,13 @@
/* /*
* Internal execution defines for qemu * Internal execution defines for qemu (target specific)
* *
* Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Fabrice Bellard
* *
* SPDX-License-Identifier: LGPL-2.1-or-later * SPDX-License-Identifier: LGPL-2.1-or-later
*/ */
#ifndef ACCEL_TCG_INTERNAL_H #ifndef ACCEL_TCG_INTERNAL_TARGET_H
#define ACCEL_TCG_INTERNAL_H #define ACCEL_TCG_INTERNAL_TARGET_H
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/translate-all.h" #include "exec/translate-all.h"
@ -80,6 +80,9 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc); uintptr_t host_pc);
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
void tcg_exec_unrealizefn(CPUState *cpu);
/* Return the current PC from CPU, which may be cached in TB. */ /* Return the current PC from CPU, which may be cached in TB. */
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb) static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
{ {
@ -90,18 +93,6 @@ static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
} }
} }
/*
* Return true if CS is not running in parallel with other cpus, either
* because there are no other cpus or we are within an exclusive context.
*/
static inline bool cpu_in_serial_context(CPUState *cs)
{
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
}
extern int64_t max_delay;
extern int64_t max_advance;
extern bool one_insn_per_tb; extern bool one_insn_per_tb;
/** /**

View File

@ -26,7 +26,7 @@
* If the operation must be split into two operations to be * If the operation must be split into two operations to be
* examined separately for atomicity, return -lg2. * examined separately for atomicity, return -lg2.
*/ */
static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop) static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
{ {
MemOp atom = memop & MO_ATOM_MASK; MemOp atom = memop & MO_ATOM_MASK;
MemOp size = memop & MO_SIZE; MemOp size = memop & MO_SIZE;
@ -93,7 +93,7 @@ static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
* host atomicity in order to avoid racing. This reduction * host atomicity in order to avoid racing. This reduction
* avoids looping with cpu_loop_exit_atomic. * avoids looping with cpu_loop_exit_atomic.
*/ */
if (cpu_in_serial_context(env_cpu(env))) { if (cpu_in_serial_context(cpu)) {
return MO_8; return MO_8;
} }
return atmax; return atmax;
@ -139,14 +139,14 @@ static inline uint64_t load_atomic8(void *pv)
/** /**
* load_atomic8_or_exit: * load_atomic8_or_exit:
* @env: cpu context * @cpu: generic cpu state
* @ra: host unwind address * @ra: host unwind address
* @pv: host address * @pv: host address
* *
* Atomically load 8 aligned bytes from @pv. * Atomically load 8 aligned bytes from @pv.
* If this is not possible, longjmp out to restart serially. * If this is not possible, longjmp out to restart serially.
*/ */
static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv) static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
{ {
if (HAVE_al8) { if (HAVE_al8) {
return load_atomic8(pv); return load_atomic8(pv);
@ -168,19 +168,19 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
#endif #endif
/* Ultimate fallback: re-execute in serial context. */ /* Ultimate fallback: re-execute in serial context. */
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }
/** /**
* load_atomic16_or_exit: * load_atomic16_or_exit:
* @env: cpu context * @cpu: generic cpu state
* @ra: host unwind address * @ra: host unwind address
* @pv: host address * @pv: host address
* *
* Atomically load 16 aligned bytes from @pv. * Atomically load 16 aligned bytes from @pv.
* If this is not possible, longjmp out to restart serially. * If this is not possible, longjmp out to restart serially.
*/ */
static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv) static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
{ {
Int128 *p = __builtin_assume_aligned(pv, 16); Int128 *p = __builtin_assume_aligned(pv, 16);
@ -212,7 +212,7 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
} }
/* Ultimate fallback: re-execute in serial context. */ /* Ultimate fallback: re-execute in serial context. */
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }
/** /**
@ -263,7 +263,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
/** /**
* load_atom_extract_al8_or_exit: * load_atom_extract_al8_or_exit:
* @env: cpu context * @cpu: generic cpu state
* @ra: host unwind address * @ra: host unwind address
* @pv: host address * @pv: host address
* @s: object size in bytes, @s <= 4. * @s: object size in bytes, @s <= 4.
@ -273,7 +273,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
* 8-byte load and extract. * 8-byte load and extract.
* The value is returned in the low bits of a uint32_t. * The value is returned in the low bits of a uint32_t.
*/ */
static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra, static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
void *pv, int s) void *pv, int s)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -281,12 +281,12 @@ static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8; int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
pv = (void *)(pi & ~7); pv = (void *)(pi & ~7);
return load_atomic8_or_exit(env, ra, pv) >> shr; return load_atomic8_or_exit(cpu, ra, pv) >> shr;
} }
/** /**
* load_atom_extract_al16_or_exit: * load_atom_extract_al16_or_exit:
* @env: cpu context * @cpu: generic cpu state
* @ra: host unwind address * @ra: host unwind address
* @p: host address * @p: host address
* @s: object size in bytes, @s <= 8. * @s: object size in bytes, @s <= 8.
@ -299,7 +299,7 @@ static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
* *
* If this is not possible, longjmp out to restart serially. * If this is not possible, longjmp out to restart serially.
*/ */
static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra, static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
void *pv, int s) void *pv, int s)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -312,7 +312,7 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
* Provoke SIGBUS if possible otherwise. * Provoke SIGBUS if possible otherwise.
*/ */
pv = (void *)(pi & ~7); pv = (void *)(pi & ~7);
r = load_atomic16_or_exit(env, ra, pv); r = load_atomic16_or_exit(cpu, ra, pv);
r = int128_urshift(r, shr); r = int128_urshift(r, shr);
return int128_getlo(r); return int128_getlo(r);
@ -394,7 +394,7 @@ static inline uint64_t load_atom_8_by_8_or_4(void *pv)
* *
* Load 2 bytes from @p, honoring the atomicity of @memop. * Load 2 bytes from @p, honoring the atomicity of @memop.
*/ */
static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra, static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop) void *pv, MemOp memop)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -410,7 +410,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
} }
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
switch (atmax) { switch (atmax) {
case MO_8: case MO_8:
return lduw_he_p(pv); return lduw_he_p(pv);
@ -421,9 +421,9 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
return load_atomic4(pv - 1) >> 8; return load_atomic4(pv - 1) >> 8;
} }
if ((pi & 15) != 7) { if ((pi & 15) != 7) {
return load_atom_extract_al8_or_exit(env, ra, pv, 2); return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
} }
return load_atom_extract_al16_or_exit(env, ra, pv, 2); return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -436,7 +436,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
* *
* Load 4 bytes from @p, honoring the atomicity of @memop. * Load 4 bytes from @p, honoring the atomicity of @memop.
*/ */
static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra, static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop) void *pv, MemOp memop)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -452,7 +452,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
} }
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
switch (atmax) { switch (atmax) {
case MO_8: case MO_8:
case MO_16: case MO_16:
@ -466,9 +466,9 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
return load_atom_extract_al4x2(pv); return load_atom_extract_al4x2(pv);
case MO_32: case MO_32:
if (!(pi & 4)) { if (!(pi & 4)) {
return load_atom_extract_al8_or_exit(env, ra, pv, 4); return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
} }
return load_atom_extract_al16_or_exit(env, ra, pv, 4); return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -481,7 +481,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
* *
* Load 8 bytes from @p, honoring the atomicity of @memop. * Load 8 bytes from @p, honoring the atomicity of @memop.
*/ */
static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra, static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop) void *pv, MemOp memop)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -498,12 +498,12 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
return load_atom_extract_al16_or_al8(pv, 8); return load_atom_extract_al16_or_al8(pv, 8);
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_64) { if (atmax == MO_64) {
if (!HAVE_al8 && (pi & 7) == 0) { if (!HAVE_al8 && (pi & 7) == 0) {
load_atomic8_or_exit(env, ra, pv); load_atomic8_or_exit(cpu, ra, pv);
} }
return load_atom_extract_al16_or_exit(env, ra, pv, 8); return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
} }
if (HAVE_al8_fast) { if (HAVE_al8_fast) {
return load_atom_extract_al8x2(pv); return load_atom_extract_al8x2(pv);
@ -519,7 +519,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
if (HAVE_al8) { if (HAVE_al8) {
return load_atom_extract_al8x2(pv); return load_atom_extract_al8x2(pv);
} }
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -532,7 +532,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
* *
* Load 16 bytes from @p, honoring the atomicity of @memop. * Load 16 bytes from @p, honoring the atomicity of @memop.
*/ */
static Int128 load_atom_16(CPUArchState *env, uintptr_t ra, static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop) void *pv, MemOp memop)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -548,7 +548,7 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
return atomic16_read_ro(pv); return atomic16_read_ro(pv);
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
switch (atmax) { switch (atmax) {
case MO_8: case MO_8:
memcpy(&r, pv, 16); memcpy(&r, pv, 16);
@ -563,20 +563,20 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
break; break;
case MO_64: case MO_64:
if (!HAVE_al8) { if (!HAVE_al8) {
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }
a = load_atomic8(pv); a = load_atomic8(pv);
b = load_atomic8(pv + 8); b = load_atomic8(pv + 8);
break; break;
case -MO_64: case -MO_64:
if (!HAVE_al8) { if (!HAVE_al8) {
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }
a = load_atom_extract_al8x2(pv); a = load_atom_extract_al8x2(pv);
b = load_atom_extract_al8x2(pv + 8); b = load_atom_extract_al8x2(pv + 8);
break; break;
case MO_128: case MO_128:
return load_atomic16_or_exit(env, ra, pv); return load_atomic16_or_exit(cpu, ra, pv);
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -857,7 +857,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
* *
* Store 2 bytes to @p, honoring the atomicity of @memop. * Store 2 bytes to @p, honoring the atomicity of @memop.
*/ */
static void store_atom_2(CPUArchState *env, uintptr_t ra, static void store_atom_2(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint16_t val) void *pv, MemOp memop, uint16_t val)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -868,7 +868,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
return; return;
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_8) { if (atmax == MO_8) {
stw_he_p(pv, val); stw_he_p(pv, val);
return; return;
@ -897,7 +897,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
g_assert_not_reached(); g_assert_not_reached();
} }
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }
/** /**
@ -908,7 +908,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
* *
* Store 4 bytes to @p, honoring the atomicity of @memop. * Store 4 bytes to @p, honoring the atomicity of @memop.
*/ */
static void store_atom_4(CPUArchState *env, uintptr_t ra, static void store_atom_4(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint32_t val) void *pv, MemOp memop, uint32_t val)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -919,7 +919,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
return; return;
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
switch (atmax) { switch (atmax) {
case MO_8: case MO_8:
stl_he_p(pv, val); stl_he_p(pv, val);
@ -961,7 +961,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
return; return;
} }
} }
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
@ -975,7 +975,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
* *
* Store 8 bytes to @p, honoring the atomicity of @memop. * Store 8 bytes to @p, honoring the atomicity of @memop.
*/ */
static void store_atom_8(CPUArchState *env, uintptr_t ra, static void store_atom_8(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint64_t val) void *pv, MemOp memop, uint64_t val)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -986,7 +986,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
return; return;
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
switch (atmax) { switch (atmax) {
case MO_8: case MO_8:
stq_he_p(pv, val); stq_he_p(pv, val);
@ -1029,7 +1029,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }
/** /**
@ -1040,7 +1040,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
* *
* Store 16 bytes to @p, honoring the atomicity of @memop. * Store 16 bytes to @p, honoring the atomicity of @memop.
*/ */
static void store_atom_16(CPUArchState *env, uintptr_t ra, static void store_atom_16(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, Int128 val) void *pv, MemOp memop, Int128 val)
{ {
uintptr_t pi = (uintptr_t)pv; uintptr_t pi = (uintptr_t)pv;
@ -1052,7 +1052,7 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
return; return;
} }
atmax = required_atomicity(env, pi, memop); atmax = required_atomicity(cpu, pi, memop);
a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val); a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val); b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
@ -1111,5 +1111,5 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
cpu_loop_exit_atomic(env_cpu(env), ra); cpu_loop_exit_atomic(cpu, ra);
} }

View File

@ -8,6 +8,231 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later. * This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
*/ */
/*
* Load helpers for tcg-ldst.h
*/
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
}
/*
* Provide signed versions of the load routines as well. We can of course
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
*/
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
}
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
}
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
{
return helper_ld16_mmu(env, addr, oi, GETPC());
}
/*
* Store helpers for tcg-ldst.h
*/
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
}
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
}
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
{
helper_st16_mmu(env, addr, val, oi, GETPC());
}
/*
* Load helpers for cpu_ldst.h
*/
static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
{
uint8_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint64_t ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
plugin_load_cb(env, addr, oi);
return ret;
}
/*
* Store helpers for cpu_ldst.h
*/
static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
helper_stb_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
/*
* Wrappers of the above
*/
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) int mmu_idx, uintptr_t ra)

View File

@ -1,7 +1,9 @@
tcg_ss = ss.source_set() tcg_ss = ss.source_set()
common_ss.add(when: 'CONFIG_TCG', if_true: files(
'cpu-exec-common.c',
))
tcg_ss.add(files( tcg_ss.add(files(
'tcg-all.c', 'tcg-all.c',
'cpu-exec-common.c',
'cpu-exec.c', 'cpu-exec.c',
'tb-maint.c', 'tb-maint.c',
'tcg-runtime-gvec.c', 'tcg-runtime-gvec.c',
@ -20,6 +22,10 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'cputlb.c', 'cputlb.c',
))
system_ss.add(when: ['CONFIG_TCG'], if_true: files(
'icount-common.c',
'monitor.c', 'monitor.c',
)) ))

View File

@ -16,7 +16,7 @@
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "internal.h" #include "internal-common.h"
static void dump_drift_info(GString *buf) static void dump_drift_info(GString *buf)

View File

@ -104,7 +104,7 @@ static void gen_empty_udata_cb(void)
TCGv_ptr udata = tcg_temp_ebb_new_ptr(); TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(udata, 0); tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env, tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
@ -138,7 +138,7 @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
tcg_gen_movi_i32(meminfo, info); tcg_gen_movi_i32(meminfo, info);
tcg_gen_movi_ptr(udata, 0); tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, cpu_env, tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata); gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
@ -157,7 +157,7 @@ static void gen_empty_mem_helper(void)
TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(ptr, 0); tcg_gen_movi_ptr(ptr, 0);
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env)); offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr); tcg_temp_free_ptr(ptr);
} }
@ -581,7 +581,7 @@ void plugin_gen_disable_mem_helpers(void)
if (!tcg_ctx->plugin_tb->mem_helper) { if (!tcg_ctx->plugin_tb->mem_helper) {
return; return;
} }
tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env, tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env,
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env)); offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
} }
@ -849,7 +849,7 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
} else { } else {
if (ptb->vaddr2 == -1) { if (ptb->vaddr2 == -1) {
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2); get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
} }
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
} }

View File

@ -29,7 +29,8 @@
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal-common.h"
#include "internal-target.h"
/* List iterators for lists of tagged pointers in TranslationBlock. */ /* List iterators for lists of tagged pointers in TranslationBlock. */

View File

@ -111,14 +111,14 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
* each vCPU execution. However u16.high can be raised * each vCPU execution. However u16.high can be raised
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
*/ */
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); g_assert(cpu->neg.icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0); g_assert(cpu->icount_extra == 0);
replay_mutex_lock(); replay_mutex_lock();
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget); cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
insns_left = MIN(0xffff, cpu->icount_budget); insns_left = MIN(0xffff, cpu->icount_budget);
cpu_neg(cpu)->icount_decr.u16.low = insns_left; cpu->neg.icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left; cpu->icount_extra = cpu->icount_budget - insns_left;
if (cpu->icount_budget == 0) { if (cpu->icount_budget == 0) {
@ -138,7 +138,7 @@ void icount_process_data(CPUState *cpu)
icount_update(cpu); icount_update(cpu);
/* Reset the counters */ /* Reset the counters */
cpu_neg(cpu)->icount_decr.u16.low = 0; cpu->neg.icount_decr.u16.low = 0;
cpu->icount_extra = 0; cpu->icount_extra = 0;
cpu->icount_budget = 0; cpu->icount_budget = 0;
@ -153,7 +153,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
tcg_handle_interrupt(cpu, mask); tcg_handle_interrupt(cpu, mask);
if (qemu_cpu_is_self(cpu) && if (qemu_cpu_is_self(cpu) &&
!cpu->can_do_io !cpu->neg.can_do_io
&& (mask & ~old_mask) != 0) { && (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function"); cpu_abort(cpu, "Raised interrupt while not in I/O function");
} }

View File

@ -32,7 +32,7 @@
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "hw/boards.h" #include "hw/boards.h"
#include "tcg/tcg.h" #include "tcg/startup.h"
#include "tcg-accel-ops.h" #include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h" #include "tcg-accel-ops-mttcg.h"
@ -80,7 +80,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
current_cpu = cpu; current_cpu = cpu;
cpu_thread_signal_created(cpu); cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed); qemu_guest_random_seed_thread_part2(cpu->random_seed);

View File

@ -32,7 +32,7 @@
#include "qemu/notify.h" #include "qemu/notify.h"
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "tcg/tcg.h" #include "tcg/startup.h"
#include "tcg-accel-ops.h" #include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h" #include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h" #include "tcg-accel-ops-icount.h"
@ -192,7 +192,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread); qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id(); cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
cpu_thread_signal_created(cpu); cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed); qemu_guest_random_seed_thread_part2(cpu->random_seed);
@ -334,7 +334,7 @@ void rr_start_vcpu_thread(CPUState *cpu)
cpu->thread = single_tcg_cpu_thread; cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond; cpu->halt_cond = single_tcg_halt_cond;
cpu->thread_id = first_cpu->thread_id; cpu->thread_id = first_cpu->thread_id;
cpu->can_do_io = 1; cpu->neg.can_do_io = 1;
cpu->created = true; cpu->created = true;
} }
} }

View File

@ -91,7 +91,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
if (!qemu_cpu_is_self(cpu)) { if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu); qemu_cpu_kick(cpu);
} else { } else {
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
} }
} }

View File

@ -27,7 +27,7 @@
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "exec/replay-core.h" #include "exec/replay-core.h"
#include "sysemu/cpu-timers.h" #include "sysemu/cpu-timers.h"
#include "tcg/tcg.h" #include "tcg/startup.h"
#include "tcg/oversized-guest.h" #include "tcg/oversized-guest.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
@ -38,7 +38,7 @@
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
#include "hw/boards.h" #include "hw/boards.h"
#endif #endif
#include "internal.h" #include "internal-target.h"
struct TCGState { struct TCGState {
AccelState parent_obj; AccelState parent_obj;
@ -121,7 +121,7 @@ static int tcg_init_machine(MachineState *ms)
* There's no guest base to take into account, so go ahead and * There's no guest base to take into account, so go ahead and
* initialize the prologue now. * initialize the prologue now.
*/ */
tcg_prologue_init(tcg_ctx); tcg_prologue_init();
#endif #endif
return 0; return 0;
@ -227,6 +227,8 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
AccelClass *ac = ACCEL_CLASS(oc); AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg"; ac->name = "tcg";
ac->init_machine = tcg_init_machine; ac->init_machine = tcg_init_machine;
ac->cpu_common_realize = tcg_exec_realizefn;
ac->cpu_common_unrealize = tcg_exec_unrealizefn;
ac->allowed = &tcg_allowed; ac->allowed = &tcg_allowed;
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;

View File

@ -61,7 +61,8 @@
#include "tb-jmp-cache.h" #include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal-common.h"
#include "internal-target.h"
#include "perf.h" #include "perf.h"
#include "tcg/insn-start-words.h" #include "tcg/insn-start-words.h"
@ -214,7 +215,7 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
* Reset the cycle counter to the start of the block and * Reset the cycle counter to the start of the block and
* shift if to the number of actually executed instructions. * shift if to the number of actually executed instructions.
*/ */
cpu_neg(cpu)->icount_decr.u16.low += insns_left; cpu->neg.icount_decr.u16.low += insns_left;
} }
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
@ -288,7 +289,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
vaddr pc, uint64_t cs_base, vaddr pc, uint64_t cs_base,
uint32_t flags, int cflags) uint32_t flags, int cflags)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb, *existing_tb; TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_p2; tb_page_addr_t phys_pc, phys_p2;
tcg_insn_unit *gen_code_buf; tcg_insn_unit *gen_code_buf;
@ -344,8 +345,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_ctx->page_bits = TARGET_PAGE_BITS; tcg_ctx->page_bits = TARGET_PAGE_BITS;
tcg_ctx->page_mask = TARGET_PAGE_MASK; tcg_ctx->page_mask = TARGET_PAGE_MASK;
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
tcg_ctx->tlb_fast_offset =
(int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
#endif #endif
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS; tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
#ifdef TCG_GUEST_DEFAULT_MO #ifdef TCG_GUEST_DEFAULT_MO
@ -580,7 +579,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
} else { } else {
/* The exception probably happened in a helper. The CPU state should /* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */ have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
vaddr pc; vaddr pc;
uint64_t cs_base; uint64_t cs_base;
tb_page_addr_t addr; tb_page_addr_t addr;
@ -623,7 +622,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cc = CPU_GET_CLASS(cpu); cc = CPU_GET_CLASS(cpu);
if (cc->tcg_ops->io_recompile_replay_branch && if (cc->tcg_ops->io_recompile_replay_branch &&
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
cpu_neg(cpu)->icount_decr.u16.low++; cpu->neg.icount_decr.u16.low++;
n = 2; n = 2;
} }
@ -779,7 +778,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
{ {
g_assert(qemu_mutex_iothread_locked()); g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask; cpu->interrupt_request |= mask;
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */

View File

@ -14,15 +14,17 @@
#include "exec/translator.h" #include "exec/translator.h"
#include "exec/plugin-gen.h" #include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h" #include "tcg/tcg-op-common.h"
#include "internal.h" #include "internal-target.h"
static void set_can_do_io(DisasContextBase *db, bool val) static void set_can_do_io(DisasContextBase *db, bool val)
{ {
if (db->saved_can_do_io != val) { if (db->saved_can_do_io != val) {
db->saved_can_do_io = val; db->saved_can_do_io = val;
tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) - QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
offsetof(ArchCPU, env)); tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
offsetof(ArchCPU, parent_obj.neg.can_do_io) -
offsetof(ArchCPU, env));
} }
} }
@ -47,9 +49,9 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) { if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
count = tcg_temp_new_i32(); count = tcg_temp_new_i32();
tcg_gen_ld_i32(count, cpu_env, tcg_gen_ld_i32(count, tcg_env,
offsetof(ArchCPU, neg.icount_decr.u32) - offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
offsetof(ArchCPU, env)); - offsetof(ArchCPU, env));
} }
if (cflags & CF_USE_ICOUNT) { if (cflags & CF_USE_ICOUNT) {
@ -77,13 +79,13 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
} }
if (cflags & CF_USE_ICOUNT) { if (cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, cpu_env, tcg_gen_st16_i32(count, tcg_env,
offsetof(ArchCPU, neg.icount_decr.u16.low) - offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
offsetof(ArchCPU, env)); - offsetof(ArchCPU, env));
} }
/* /*
* cpu->can_do_io is set automatically here at the beginning of * cpu->neg.can_do_io is set automatically here at the beginning of
* each translation block. The cost is minimal, plus it would be * each translation block. The cost is minimal, plus it would be
* very easy to forget doing it in the translator. * very easy to forget doing it in the translator.
*/ */

View File

@ -29,7 +29,8 @@
#include "qemu/atomic128.h" #include "qemu/atomic128.h"
#include "trace/trace-root.h" #include "trace/trace-root.h"
#include "tcg/tcg-ldst.h" #include "tcg/tcg-ldst.h"
#include "internal.h" #include "internal-common.h"
#include "internal-target.h"
__thread uintptr_t helper_retaddr; __thread uintptr_t helper_retaddr;
@ -941,7 +942,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
/* The softmmu versions of these helpers are in cputlb.c. */ /* The softmmu versions of these helpers are in cputlb.c. */
static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr, static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type) MemOp mop, uintptr_t ra, MMUAccessType type)
{ {
int a_bits = get_alignment_bits(mop); int a_bits = get_alignment_bits(mop);
@ -949,60 +950,39 @@ static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
/* Enforce guest required alignment. */ /* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) { if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); cpu_loop_exit_sigbus(cpu, addr, type, ra);
} }
ret = g2h(env_cpu(env), addr); ret = g2h(cpu, addr);
set_helper_retaddr(ra); set_helper_retaddr(ra);
return ret; return ret;
} }
#include "ldst_atomicity.c.inc" #include "ldst_atomicity.c.inc"
static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MemOp mop, uintptr_t ra) uintptr_t ra, MMUAccessType access_type)
{ {
void *haddr; void *haddr;
uint8_t ret; uint8_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
ret = ldub_p(haddr); ret = ldub_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; return ret;
} }
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MemOpIdx oi, uintptr_t ra) uintptr_t ra, MMUAccessType access_type)
{
return do_ld1_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
}
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
uint16_t ret; uint16_t ret;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_2(env, ra, haddr, mop); ret = load_atom_2(cpu, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1011,36 +991,16 @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
return ret; return ret;
} }
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MemOpIdx oi, uintptr_t ra) uintptr_t ra, MMUAccessType access_type)
{
return do_ld2_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
}
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
uint32_t ret; uint32_t ret;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_4(env, ra, haddr, mop); ret = load_atom_4(cpu, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1049,36 +1009,16 @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
return ret; return ret;
} }
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MemOpIdx oi, uintptr_t ra) uintptr_t ra, MMUAccessType access_type)
{
return do_ld4_mmu(env, addr, get_memop(oi), ra);
}
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
}
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
uint64_t ret; uint64_t ret;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_8(env, ra, haddr, mop); ret = load_atom_8(cpu, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1087,30 +1027,17 @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
return ret; return ret;
} }
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{
return do_ld8_mmu(env, addr, get_memop(oi), ra);
}
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
Int128 ret; Int128 ret;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128); tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop); ret = load_atom_16(cpu, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1119,171 +1046,81 @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
return ret; return ret;
} }
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{
return do_ld16_mmu(env, addr, get_memop(oi), ra);
}
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
{
return helper_ld16_mmu(env, addr, oi, GETPC());
}
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
stb_p(haddr, val); stb_p(haddr, val);
clear_helper_retaddr(); clear_helper_retaddr();
} }
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{
do_st1_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st1_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
val = bswap16(val); val = bswap16(val);
} }
store_atom_2(env, ra, haddr, mop, val); store_atom_2(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
} }
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{
do_st2_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st2_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
val = bswap32(val); val = bswap32(val);
} }
store_atom_4(env, ra, haddr, mop, val); store_atom_4(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
} }
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{
do_st4_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st4_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
val = bswap64(val); val = bswap64(val);
} }
store_atom_8(env, ra, haddr, mop, val); store_atom_8(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
} }
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{
do_st8_mmu(env, addr, val, get_memop(oi), ra);
}
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
do_st8_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
MemOp mop, uintptr_t ra)
{ {
void *haddr; void *haddr;
MemOpIdx mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
val = bswap128(val); val = bswap128(val);
} }
store_atom_16(env, ra, haddr, mop, val); store_atom_16(cpu, ra, haddr, mop, val);
clear_helper_retaddr(); clear_helper_retaddr();
} }
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
do_st16_mmu(env, addr, val, get_memop(oi), ra);
}
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
{
helper_st16_mmu(env, addr, val, oi, GETPC());
}
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
do_st16_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
{ {
uint32_t ret; uint32_t ret;
@ -1330,7 +1167,7 @@ uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
void *haddr; void *haddr;
uint8_t ret; uint8_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = ldub_p(haddr); ret = ldub_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; return ret;
@ -1342,7 +1179,7 @@ uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
void *haddr; void *haddr;
uint16_t ret; uint16_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = lduw_p(haddr); ret = lduw_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) { if (get_memop(oi) & MO_BSWAP) {
@ -1357,7 +1194,7 @@ uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
void *haddr; void *haddr;
uint32_t ret; uint32_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = ldl_p(haddr); ret = ldl_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) { if (get_memop(oi) & MO_BSWAP) {
@ -1372,7 +1209,7 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
void *haddr; void *haddr;
uint64_t ret; uint64_t ret;
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_p(haddr); ret = ldq_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) { if (get_memop(oi) & MO_BSWAP) {
@ -1386,7 +1223,7 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
/* /*
* Do not allow unaligned operations to proceed. Return the host address. * Do not allow unaligned operations to proceed. Return the host address.
*/ */
static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi, static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr) int size, uintptr_t retaddr)
{ {
MemOp mop = get_memop(oi); MemOp mop = get_memop(oi);
@ -1395,15 +1232,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
/* Enforce guest required alignment. */ /* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) { if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr); cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
} }
/* Enforce qemu required alignment. */ /* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) { if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(env_cpu(env), retaddr); cpu_loop_exit_atomic(cpu, retaddr);
} }
ret = g2h(env_cpu(env), addr); ret = g2h(cpu, addr);
set_helper_retaddr(retaddr); set_helper_retaddr(retaddr);
return ret; return ret;
} }

View File

@ -32,9 +32,6 @@ int bsd_get_ncpu(void);
/* exit(2) */ /* exit(2) */
static inline abi_long do_bsd_exit(void *cpu_env, abi_long arg1) static inline abi_long do_bsd_exit(void *cpu_env, abi_long arg1)
{ {
#ifdef TARGET_GPROF
_mcleanup();
#endif
gdb_exit(arg1); gdb_exit(arg1);
qemu_plugin_user_exit(); qemu_plugin_user_exit();
_exit(arg1); _exit(arg1);

View File

@ -36,7 +36,7 @@
#include "qemu/help_option.h" #include "qemu/help_option.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "tcg/tcg.h" #include "tcg/startup.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/envlist.h" #include "qemu/envlist.h"
#include "qemu/cutils.h" #include "qemu/cutils.h"
@ -462,7 +462,7 @@ int main(int argc, char **argv)
ac->init_machine(NULL); ac->init_machine(NULL);
} }
cpu = cpu_create(cpu_type); cpu = cpu_create(cpu_type);
env = cpu->env_ptr; env = cpu_env(cpu);
cpu_reset(cpu); cpu_reset(cpu);
thread_cpu = cpu; thread_cpu = cpu;
@ -586,7 +586,7 @@ int main(int argc, char **argv)
* generating the prologue until now so that the prologue can take * generating the prologue until now so that the prologue can take
* the real value of GUEST_BASE into account. * the real value of GUEST_BASE into account.
*/ */
tcg_prologue_init(tcg_ctx); tcg_prologue_init();
target_cpu_init(env, regs); target_cpu_init(env, regs);

View File

@ -357,8 +357,8 @@ static int core_dump_signal(int sig)
static G_NORETURN static G_NORETURN
void dump_core_and_abort(int target_sig) void dump_core_and_abort(int target_sig)
{ {
CPUArchState *env = thread_cpu->env_ptr; CPUState *cpu = thread_cpu;
CPUState *cpu = env_cpu(env); CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque; TaskState *ts = cpu->opaque;
int core_dumped = 0; int core_dumped = 0;
int host_sig; int host_sig;
@ -463,7 +463,7 @@ static int fatal_signal(int sig)
void force_sig_fault(int sig, int code, abi_ulong addr) void force_sig_fault(int sig, int code, abi_ulong addr)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
target_siginfo_t info = {}; target_siginfo_t info = {};
info.si_signo = sig; info.si_signo = sig;
@ -475,8 +475,7 @@ void force_sig_fault(int sig, int code, abi_ulong addr)
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{ {
CPUArchState *env = thread_cpu->env_ptr; CPUState *cpu = thread_cpu;
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque; TaskState *ts = cpu->opaque;
target_siginfo_t tinfo; target_siginfo_t tinfo;
ucontext_t *uc = puc; ucontext_t *uc = puc;
@ -854,11 +853,6 @@ void signal_init(void)
act.sa_flags = SA_SIGINFO; act.sa_flags = SA_SIGINFO;
for (i = 1; i <= TARGET_NSIG; i++) { for (i = 1; i <= TARGET_NSIG; i++) {
#ifdef CONFIG_GPROF
if (i == TARGET_SIGPROF) {
continue;
}
#endif
host_sig = target_to_host_signal(i); host_sig = target_to_host_signal(i);
sigaction(host_sig, NULL, &oact); sigaction(host_sig, NULL, &oact);
if (oact.sa_sigaction == (void *)SIG_IGN) { if (oact.sa_sigaction == (void *)SIG_IGN) {

View File

@ -136,15 +136,10 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
/* cache the cpu class for the hotpath */ /* cache the cpu class for the hotpath */
cpu->cc = CPU_GET_CLASS(cpu); cpu->cc = CPU_GET_CLASS(cpu);
if (!accel_cpu_realizefn(cpu, errp)) { if (!accel_cpu_common_realize(cpu, errp)) {
return; return;
} }
/* NB: errp parameter is unused currently */
if (tcg_enabled()) {
tcg_exec_realizefn(cpu, errp);
}
/* Wait until cpu initialization complete before exposing cpu. */ /* Wait until cpu initialization complete before exposing cpu. */
cpu_list_add(cpu); cpu_list_add(cpu);
@ -187,11 +182,9 @@ void cpu_exec_unrealizefn(CPUState *cpu)
cpu_list_remove(cpu); cpu_list_remove(cpu);
/* /*
* Now that the vCPU has been removed from the RCU list, we can call * Now that the vCPU has been removed from the RCU list, we can call
* tcg_exec_unrealizefn, which may free fields using call_rcu. * accel_cpu_common_unrealize, which may free fields using call_rcu.
*/ */
if (tcg_enabled()) { accel_cpu_common_unrealize(cpu);
tcg_exec_unrealizefn(cpu);
}
} }
/* /*

View File

@ -20,20 +20,6 @@ they were first deprecated in the 2.10.0 release.
What follows is a list of all features currently marked as What follows is a list of all features currently marked as
deprecated. deprecated.
Build options
-------------
``gprof`` builds (since 8.0)
''''''''''''''''''''''''''''
The ``--enable-gprof`` configure setting relies on compiler
instrumentation to gather its data which can distort the generated
profile. As other non-instrumenting tools are available that give a
more holistic view of the system with non-instrumented binaries we are
deprecating the build option and no longer defend it in CI. The
``--enable-gcov`` build option remains for analysis test case
coverage.
System emulator command line arguments System emulator command line arguments
-------------------------------------- --------------------------------------

View File

@ -423,7 +423,7 @@ static const char *get_feature_xml(const char *p, const char **newp,
static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
GDBRegisterState *r; GDBRegisterState *r;
if (reg < cc->gdb_num_core_regs) { if (reg < cc->gdb_num_core_regs) {
@ -441,7 +441,7 @@ static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg) static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
GDBRegisterState *r; GDBRegisterState *r;
if (reg < cc->gdb_num_core_regs) { if (reg < cc->gdb_num_core_regs) {

View File

@ -310,7 +310,7 @@ void gdb_handle_v_file_open(GArray *params, void *user_ctx)
uint64_t mode = get_param(params, 2)->val_ull; uint64_t mode = get_param(params, 2)->val_ull;
#ifdef CONFIG_LINUX #ifdef CONFIG_LINUX
int fd = do_guest_openat(gdbserver_state.g_cpu->env_ptr, 0, filename, int fd = do_guest_openat(cpu_env(gdbserver_state.g_cpu), 0, filename,
flags, mode, false); flags, mode, false);
#else #else
int fd = open(filename, flags, mode); int fd = open(filename, flags, mode);

View File

@ -86,7 +86,7 @@ void cpu_exit(CPUState *cpu)
qatomic_set(&cpu->exit_request, 1); qatomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */ /* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb(); smp_wmb();
qatomic_set(&cpu->icount_decr_ptr->u16.high, -1); qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
} }
static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
@ -130,8 +130,8 @@ static void cpu_common_reset_hold(Object *obj)
cpu->halted = cpu->start_powered_off; cpu->halted = cpu->start_powered_off;
cpu->mem_io_pc = 0; cpu->mem_io_pc = 0;
cpu->icount_extra = 0; cpu->icount_extra = 0;
qatomic_set(&cpu->icount_decr_ptr->u32, 0); qatomic_set(&cpu->neg.icount_decr.u32, 0);
cpu->can_do_io = 1; cpu->neg.can_do_io = true;
cpu->exception_index = -1; cpu->exception_index = -1;
cpu->crash_occurred = false; cpu->crash_occurred = false;
cpu->cflags_next_tb = -1; cpu->cflags_next_tb = -1;

View File

@ -66,7 +66,7 @@ struct pvclock_vcpu_time_info {
static uint64_t kvmclock_current_nsec(KVMClockState *s) static uint64_t kvmclock_current_nsec(KVMClockState *s)
{ {
CPUState *cpu = first_cpu; CPUState *cpu = first_cpu;
CPUX86State *env = cpu->env_ptr; CPUX86State *env = cpu_env(cpu);
hwaddr kvmclock_struct_pa; hwaddr kvmclock_struct_pa;
uint64_t migration_tsc = env->tsc; uint64_t migration_tsc = env->tsc;
struct pvclock_vcpu_time_info time; struct pvclock_vcpu_time_info time;

View File

@ -423,7 +423,7 @@ static void mips_gic_realize(DeviceState *dev, Error **errp)
/* Register the env for all VPs with the GIC */ /* Register the env for all VPs with the GIC */
for (i = 0; i < s->num_vps; i++) { for (i = 0; i < s->num_vps; i++) {
if (cs != NULL) { if (cs != NULL) {
s->vps[i].env = cs->env_ptr; s->vps[i].env = cpu_env(cs);
cs = CPU_NEXT(cs); cs = CPU_NEXT(cs);
} else { } else {
error_setg(errp, error_setg(errp,

View File

@ -131,7 +131,7 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr,
size_t hartid = mtimer->hartid_base + size_t hartid = mtimer->hartid_base +
((addr - mtimer->timecmp_base) >> 3); ((addr - mtimer->timecmp_base) >> 3);
CPUState *cpu = cpu_by_arch_id(hartid); CPUState *cpu = cpu_by_arch_id(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) { if (!env) {
qemu_log_mask(LOG_GUEST_ERROR, qemu_log_mask(LOG_GUEST_ERROR,
"aclint-mtimer: invalid hartid: %zu", hartid); "aclint-mtimer: invalid hartid: %zu", hartid);
@ -174,7 +174,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
size_t hartid = mtimer->hartid_base + size_t hartid = mtimer->hartid_base +
((addr - mtimer->timecmp_base) >> 3); ((addr - mtimer->timecmp_base) >> 3);
CPUState *cpu = cpu_by_arch_id(hartid); CPUState *cpu = cpu_by_arch_id(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) { if (!env) {
qemu_log_mask(LOG_GUEST_ERROR, qemu_log_mask(LOG_GUEST_ERROR,
"aclint-mtimer: invalid hartid: %zu", hartid); "aclint-mtimer: invalid hartid: %zu", hartid);
@ -233,7 +233,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
/* Check if timer interrupt is triggered for each hart. */ /* Check if timer interrupt is triggered for each hart. */
for (i = 0; i < mtimer->num_harts; i++) { for (i = 0; i < mtimer->num_harts; i++) {
CPUState *cpu = cpu_by_arch_id(mtimer->hartid_base + i); CPUState *cpu = cpu_by_arch_id(mtimer->hartid_base + i);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) { if (!env) {
continue; continue;
} }
@ -375,7 +375,7 @@ DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size,
for (i = 0; i < num_harts; i++) { for (i = 0; i < num_harts; i++) {
CPUState *cpu = cpu_by_arch_id(hartid_base + i); CPUState *cpu = cpu_by_arch_id(hartid_base + i);
RISCVCPU *rvcpu = RISCV_CPU(cpu); RISCVCPU *rvcpu = RISCV_CPU(cpu);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
riscv_aclint_mtimer_callback *cb = riscv_aclint_mtimer_callback *cb =
g_new0(riscv_aclint_mtimer_callback, 1); g_new0(riscv_aclint_mtimer_callback, 1);
@ -409,7 +409,7 @@ static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr,
if (addr < (swi->num_harts << 2)) { if (addr < (swi->num_harts << 2)) {
size_t hartid = swi->hartid_base + (addr >> 2); size_t hartid = swi->hartid_base + (addr >> 2);
CPUState *cpu = cpu_by_arch_id(hartid); CPUState *cpu = cpu_by_arch_id(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) { if (!env) {
qemu_log_mask(LOG_GUEST_ERROR, qemu_log_mask(LOG_GUEST_ERROR,
"aclint-swi: invalid hartid: %zu", hartid); "aclint-swi: invalid hartid: %zu", hartid);
@ -432,7 +432,7 @@ static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value,
if (addr < (swi->num_harts << 2)) { if (addr < (swi->num_harts << 2)) {
size_t hartid = swi->hartid_base + (addr >> 2); size_t hartid = swi->hartid_base + (addr >> 2);
CPUState *cpu = cpu_by_arch_id(hartid); CPUState *cpu = cpu_by_arch_id(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) { if (!env) {
qemu_log_mask(LOG_GUEST_ERROR, qemu_log_mask(LOG_GUEST_ERROR,
"aclint-swi: invalid hartid: %zu", hartid); "aclint-swi: invalid hartid: %zu", hartid);

View File

@ -333,7 +333,7 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
RISCVIMSICState *imsic = RISCV_IMSIC(dev); RISCVIMSICState *imsic = RISCV_IMSIC(dev);
RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid)); RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid));
CPUState *cpu = cpu_by_arch_id(imsic->hartid); CPUState *cpu = cpu_by_arch_id(imsic->hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL; CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!kvm_irqchip_in_kernel()) { if (!kvm_irqchip_in_kernel()) {
imsic->num_eistate = imsic->num_pages * imsic->num_irqs; imsic->num_eistate = imsic->num_pages * imsic->num_irqs;

View File

@ -373,7 +373,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
MachineState *machine = MACHINE(pms); MachineState *machine = MACHINE(pms);
unsigned int smp_cpus = machine->smp.cpus; unsigned int smp_cpus = machine->smp.cpus;
const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms); const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms);
CPUPPCState *env = first_cpu->env_ptr; CPUPPCState *env = cpu_env(first_cpu);
int ret = -1; int ret = -1;
uint64_t mem_reg_property[] = { 0, cpu_to_be64(machine->ram_size) }; uint64_t mem_reg_property[] = { 0, cpu_to_be64(machine->ram_size) };
int fdt_size; int fdt_size;
@ -499,7 +499,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
if (cpu == NULL) { if (cpu == NULL) {
continue; continue;
} }
env = cpu->env_ptr; env = cpu_env(cpu);
cpu_name = g_strdup_printf("/cpus/PowerPC,8544@%x", i); cpu_name = g_strdup_printf("/cpus/PowerPC,8544@%x", i);
qemu_fdt_add_subnode(fdt, cpu_name); qemu_fdt_add_subnode(fdt, cpu_name);

View File

@ -1125,7 +1125,7 @@ static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
* Older KVM versions with older guest kernels were broken * Older KVM versions with older guest kernels were broken
* with the magic page, don't allow the guest to map it. * with the magic page, don't allow the guest to map it.
*/ */
if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, if (!kvmppc_get_hypercall(cpu_env(first_cpu), hypercall,
sizeof(hypercall))) { sizeof(hypercall))) {
_FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
hypercall, sizeof(hypercall))); hypercall, sizeof(hypercall)));

View File

@ -26,13 +26,6 @@
#include "hw/core/cpu.h" #include "hw/core/cpu.h"
#include "qemu/rcu.h" #include "qemu/rcu.h"
#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
/* some important defines: /* some important defines:
* *
* HOST_BIG_ENDIAN : whether the host cpu is big endian and * HOST_BIG_ENDIAN : whether the host cpu is big endian and
@ -413,29 +406,14 @@ static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
} }
#ifdef CONFIG_TCG
/* accel/tcg/translate-all.c */
void dump_exec_info(GString *buf);
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
/* accel/tcg/cpu-exec.c */ /* accel/tcg/cpu-exec.c */
int cpu_exec(CPUState *cpu); int cpu_exec(CPUState *cpu);
void tcg_exec_realizefn(CPUState *cpu, Error **errp);
void tcg_exec_unrealizefn(CPUState *cpu);
/** /* Validate correct placement of CPUArchState. */
* cpu_set_cpustate_pointers(cpu) QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
* @cpu: The cpu object QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
*
* Set the generic pointers in CPUState into the outer object.
*/
static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
{
cpu->parent_obj.env_ptr = &cpu->env;
cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
}
/** /**
* env_archcpu(env) * env_archcpu(env)
@ -445,7 +423,7 @@ static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
*/ */
static inline ArchCPU *env_archcpu(CPUArchState *env) static inline ArchCPU *env_archcpu(CPUArchState *env)
{ {
return container_of(env, ArchCPU, env); return (void *)env - sizeof(CPUState);
} }
/** /**
@ -456,42 +434,7 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
*/ */
static inline CPUState *env_cpu(CPUArchState *env) static inline CPUState *env_cpu(CPUArchState *env)
{ {
return &env_archcpu(env)->parent_obj; return (void *)env - sizeof(CPUState);
}
/**
* env_neg(env)
* @env: The architecture environment
*
* Return the CPUNegativeOffsetState associated with the environment.
*/
static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
{
ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
return &arch_cpu->neg;
}
/**
* cpu_neg(cpu)
* @cpu: The generic CPUState
*
* Return the CPUNegativeOffsetState associated with the cpu.
*/
static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
{
ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
return &arch_cpu->neg;
}
/**
* env_tlb(env)
* @env: The architecture environment
*
* Return the CPUTLB state associated with the environment.
*/
static inline CPUTLB *env_tlb(CPUArchState *env)
{
return &env_neg(env)->tlb;
} }
#endif /* CPU_ALL_H */ #endif /* CPU_ALL_H */

View File

@ -7,6 +7,13 @@
#include "exec/hwaddr.h" #include "exec/hwaddr.h"
#endif #endif
#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
/** /**
* vaddr: * vaddr:
* Type wide enough to contain any #target_ulong virtual address. * Type wide enough to contain any #target_ulong virtual address.
@ -166,4 +173,36 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
/* vl.c */ /* vl.c */
void list_cpus(void); void list_cpus(void);
#ifdef CONFIG_TCG
/**
* cpu_unwind_state_data:
* @cpu: the cpu context
* @host_pc: the host pc within the translation
* @data: output data
*
* Attempt to load the the unwind state for a host pc occurring in
* translated code. If @host_pc is not in translated code, the
* function returns false; otherwise @data is loaded.
* This is the same unwind info as given to restore_state_to_opc.
*/
bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
/**
* cpu_restore_state:
* @cpu: the cpu context
* @host_pc: the host pc within the translation
* @return: true if state was restored, false otherwise
*
* Attempt to restore the state for a fault occurring in translated
* code. If @host_pc is not in translated code no state is
* restored and the function returns false.
*/
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
#endif /* CONFIG_TCG */
G_NORETURN void cpu_loop_exit(CPUState *cpu);
G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#endif /* CPU_COMMON_H */ #endif /* CPU_COMMON_H */

View File

@ -54,18 +54,7 @@
#include "exec/target_long.h" #include "exec/target_long.h"
/*
* Fix the number of mmu modes to 16, which is also the maximum
* supported by the softmmu tlb api.
*/
#define NB_MMU_MODES 16
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG) #if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
#include "exec/tlb-common.h"
/* use a fully associative victim tlb of 8 entries */
#define CPU_VTLB_SIZE 8
#define CPU_TLB_DYN_MIN_BITS 6 #define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8 #define CPU_TLB_DYN_DEFAULT_BITS 8
@ -91,131 +80,4 @@
#endif /* CONFIG_SOFTMMU && CONFIG_TCG */ #endif /* CONFIG_SOFTMMU && CONFIG_TCG */
#if defined(CONFIG_SOFTMMU)
/*
* The full TLB entry, which is not accessed by generated TCG code,
* so the layout is not as critical as that of CPUTLBEntry. This is
* also why we don't want to combine the two structs.
*/
typedef struct CPUTLBEntryFull {
/*
* @xlat_section contains:
* - For ram, an offset which must be added to the virtual address
* to obtain the ram_addr_t of the target RAM
* - For other memory regions,
* + in the lower TARGET_PAGE_BITS, the physical section number
* + with the TARGET_PAGE_BITS masked off, the offset within
* the target MemoryRegion
*/
hwaddr xlat_section;
/*
* @phys_addr contains the physical address in the address space
* given by cpu_asidx_from_attrs(cpu, @attrs).
*/
hwaddr phys_addr;
/* @attrs contains the memory transaction attributes for the page. */
MemTxAttrs attrs;
/* @prot contains the complete protections for the page. */
uint8_t prot;
/* @lg_page_size contains the log2 of the page size. */
uint8_t lg_page_size;
/*
* Additional tlb flags for use by the slow path. If non-zero,
* the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
*/
uint8_t slow_flags[MMU_ACCESS_COUNT];
/*
* Allow target-specific additions to this structure.
* This may be used to cache items from the guest cpu
* page tables for later use by the implementation.
*/
#ifdef TARGET_PAGE_ENTRY_EXTRA
TARGET_PAGE_ENTRY_EXTRA
#endif
} CPUTLBEntryFull;
#endif /* CONFIG_SOFTMMU */
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
/*
* Data elements that are per MMU mode, minus the bits accessed by
* the TCG fast path.
*/
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
* into the tlb. When any page within this region is flushed,
* we must flush the entire tlb. The region is matched if
* (addr & large_page_mask) == large_page_addr.
*/
vaddr large_page_addr;
vaddr large_page_mask;
/* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns;
/* maximum number of entries observed in the window */
size_t window_max_entries;
size_t n_used_entries;
/* The next index to use in the tlb victim table. */
size_t vindex;
/* The tlb victim table, in two parts. */
CPUTLBEntry vtable[CPU_VTLB_SIZE];
CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
CPUTLBEntryFull *fulltlb;
} CPUTLBDesc;
/*
* Data elements that are shared between all MMU modes.
*/
typedef struct CPUTLBCommon {
/* Serialize updates to f.table and d.vtable, and others as noted. */
QemuSpin lock;
/*
* Within dirty, for each bit N, modifications have been made to
* mmu_idx N since the last time that mmu_idx was flushed.
* Protected by tlb_c.lock.
*/
uint16_t dirty;
/*
* Statistics. These are not lock protected, but are read and
* written atomically. This allows the monitor to print a snapshot
* of the stats without interfering with the cpu.
*/
size_t full_flush_count;
size_t part_flush_count;
size_t elide_flush_count;
} CPUTLBCommon;
/*
* The entire softmmu tlb, for all MMU modes.
* The meaning of each of the MMU modes is defined in the target code.
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
typedef struct CPUTLB {
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];
CPUTLBDescFast f[NB_MMU_MODES];
} CPUTLB;
#else
typedef struct CPUTLB { } CPUTLB;
#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
/*
* This structure must be placed in ArchCPU immediately
* before CPUArchState, as a field named "neg".
*/
typedef struct CPUNegativeOffsetState {
CPUTLB tlb;
IcountDecr icount_decr;
} CPUNegativeOffsetState;
#endif #endif

View File

@ -361,19 +361,19 @@ static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
} }
/* Find the TLB index corresponding to the mmu_idx + address pair. */ /* Find the TLB index corresponding to the mmu_idx + address pair. */
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
vaddr addr) vaddr addr)
{ {
uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
return (addr >> TARGET_PAGE_BITS) & size_mask; return (addr >> TARGET_PAGE_BITS) & size_mask;
} }
/* Find the TLB entry corresponding to the mmu_idx + address pair. */ /* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
vaddr addr) vaddr addr)
{ {
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
} }
#endif /* defined(CONFIG_USER_ONLY) */ #endif /* defined(CONFIG_USER_ONLY) */

View File

@ -27,36 +27,6 @@
#include "exec/translation-block.h" #include "exec/translation-block.h"
#include "qemu/clang-tsa.h" #include "qemu/clang-tsa.h"
/**
* cpu_unwind_state_data:
* @cpu: the cpu context
* @host_pc: the host pc within the translation
* @data: output data
*
* Attempt to load the the unwind state for a host pc occurring in
* translated code. If @host_pc is not in translated code, the
* function returns false; otherwise @data is loaded.
* This is the same unwind info as given to restore_state_to_opc.
*/
bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
/**
* cpu_restore_state:
* @cpu: the cpu context
* @host_pc: the host pc within the translation
* @return: true if state was restored, false otherwise
*
* Attempt to restore the state for a fault occurring in translated
* code. If @host_pc is not in translated code no state is
* restored and the function returns false.
*/
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
G_NORETURN void cpu_loop_exit(CPUState *cpu);
G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
/** /**
* cpu_loop_exit_requested: * cpu_loop_exit_requested:
* @cpu: The CPU state to be tested * @cpu: The CPU state to be tested
@ -71,7 +41,7 @@ G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
*/ */
static inline bool cpu_loop_exit_requested(CPUState *cpu) static inline bool cpu_loop_exit_requested(CPUState *cpu)
{ {
return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
} }
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)

View File

@ -32,7 +32,7 @@ typedef struct AccelCPUClass {
void (*cpu_class_init)(CPUClass *cc); void (*cpu_class_init)(CPUClass *cc);
void (*cpu_instance_init)(CPUState *cpu); void (*cpu_instance_init)(CPUState *cpu);
bool (*cpu_realizefn)(CPUState *cpu, Error **errp); bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
} AccelCPUClass; } AccelCPUClass;
#endif /* ACCEL_CPU_H */ #endif /* ACCEL_CPU_H */

View File

@ -25,6 +25,7 @@
#include "exec/cpu-common.h" #include "exec/cpu-common.h"
#include "exec/hwaddr.h" #include "exec/hwaddr.h"
#include "exec/memattrs.h" #include "exec/memattrs.h"
#include "exec/tlb-common.h"
#include "qapi/qapi-types-run-state.h" #include "qapi/qapi-types-run-state.h"
#include "qemu/bitmap.h" #include "qemu/bitmap.h"
#include "qemu/rcu_queue.h" #include "qemu/rcu_queue.h"
@ -192,6 +193,137 @@ struct CPUClass {
bool gdb_stop_before_watchpoint; bool gdb_stop_before_watchpoint;
}; };
/*
* Fix the number of mmu modes to 16, which is also the maximum
* supported by the softmmu tlb api.
*/
#define NB_MMU_MODES 16
/* Use a fully associative victim tlb of 8 entries. */
#define CPU_VTLB_SIZE 8
/*
* The full TLB entry, which is not accessed by generated TCG code,
* so the layout is not as critical as that of CPUTLBEntry. This is
* also why we don't want to combine the two structs.
*/
typedef struct CPUTLBEntryFull {
/*
* @xlat_section contains:
* - in the lower TARGET_PAGE_BITS, a physical section number
* - with the lower TARGET_PAGE_BITS masked off, an offset which
* must be added to the virtual address to obtain:
* + the ram_addr_t of the target RAM (if the physical section
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
* + the offset within the target MemoryRegion (otherwise)
*/
hwaddr xlat_section;
/*
* @phys_addr contains the physical address in the address space
* given by cpu_asidx_from_attrs(cpu, @attrs).
*/
hwaddr phys_addr;
/* @attrs contains the memory transaction attributes for the page. */
MemTxAttrs attrs;
/* @prot contains the complete protections for the page. */
uint8_t prot;
/* @lg_page_size contains the log2 of the page size. */
uint8_t lg_page_size;
/*
* Additional tlb flags for use by the slow path. If non-zero,
* the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
*/
uint8_t slow_flags[MMU_ACCESS_COUNT];
/*
* Allow target-specific additions to this structure.
* This may be used to cache items from the guest cpu
* page tables for later use by the implementation.
*/
union {
/*
* Cache the attrs and shareability fields from the page table entry.
*
* For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
* Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
* For shareability and guarded, as in the SH and GP fields respectively
* of the VMSAv8-64 PTEs.
*/
struct {
uint8_t pte_attrs;
uint8_t shareability;
bool guarded;
} arm;
} extra;
} CPUTLBEntryFull;
/*
* Data elements that are per MMU mode, minus the bits accessed by
* the TCG fast path.
*/
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
* into the tlb. When any page within this region is flushed,
* we must flush the entire tlb. The region is matched if
* (addr & large_page_mask) == large_page_addr.
*/
vaddr large_page_addr;
vaddr large_page_mask;
/* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns;
/* maximum number of entries observed in the window */
size_t window_max_entries;
size_t n_used_entries;
/* The next index to use in the tlb victim table. */
size_t vindex;
/* The tlb victim table, in two parts. */
CPUTLBEntry vtable[CPU_VTLB_SIZE];
CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
CPUTLBEntryFull *fulltlb;
} CPUTLBDesc;
/*
* Data elements that are shared between all MMU modes.
*/
typedef struct CPUTLBCommon {
/* Serialize updates to f.table and d.vtable, and others as noted. */
QemuSpin lock;
/*
* Within dirty, for each bit N, modifications have been made to
* mmu_idx N since the last time that mmu_idx was flushed.
* Protected by tlb_c.lock.
*/
uint16_t dirty;
/*
* Statistics. These are not lock protected, but are read and
* written atomically. This allows the monitor to print a snapshot
* of the stats without interfering with the cpu.
*/
size_t full_flush_count;
size_t part_flush_count;
size_t elide_flush_count;
} CPUTLBCommon;
/*
* The entire softmmu tlb, for all MMU modes.
* The meaning of each of the MMU modes is defined in the target code.
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
typedef struct CPUTLB {
#ifdef CONFIG_TCG
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];
CPUTLBDescFast f[NB_MMU_MODES];
#endif
} CPUTLB;
/* /*
* Low 16 bits: number of cycles left, used only in icount mode. * Low 16 bits: number of cycles left, used only in icount mode.
* High 16 bits: Set to -1 to force TCG to stop executing linked TBs * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
@ -212,6 +344,16 @@ typedef union IcountDecr {
} u16; } u16;
} IcountDecr; } IcountDecr;
/*
* Elements of CPUState most efficiently accessed from CPUArchState,
* via small negative offsets.
*/
typedef struct CPUNegativeOffsetState {
CPUTLB tlb;
IcountDecr icount_decr;
bool can_do_io;
} CPUNegativeOffsetState;
typedef struct CPUBreakpoint { typedef struct CPUBreakpoint {
vaddr pc; vaddr pc;
int flags; /* BP_* */ int flags; /* BP_* */
@ -279,16 +421,12 @@ struct qemu_work_item;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping. * @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event. * @icount_extra: Instructions until next timer event.
* @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution * @neg.can_do_io: True if memory-mapped IO is allowed.
* requires that IO only be performed on the last instruction of a TB
* so that interrupts take effect immediately.
* @cpu_ases: Pointer to array of CPUAddressSpaces (which define the * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
* AddressSpaces this CPU has) * AddressSpaces this CPU has)
* @num_ases: number of CPUAddressSpaces in @cpu_ases * @num_ases: number of CPUAddressSpaces in @cpu_ases
* @as: Pointer to the first AddressSpace, for the convenience of targets which * @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace * only have a single AddressSpace
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @icount_decr_ptr: Pointer to IcountDecr field within subclass.
* @gdb_regs: Additional GDB registers. * @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB. * @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets. * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
@ -312,6 +450,9 @@ struct qemu_work_item;
* dirty ring structure. * dirty ring structure.
* *
* State of one CPU core or thread. * State of one CPU core or thread.
*
* Align, in order to match possible alignment required by CPUArchState,
* and eliminate a hole between CPUState and CPUArchState within ArchCPU.
*/ */
struct CPUState { struct CPUState {
/*< private >*/ /*< private >*/
@ -359,9 +500,6 @@ struct CPUState {
AddressSpace *as; AddressSpace *as;
MemoryRegion *memory; MemoryRegion *memory;
CPUArchState *env_ptr;
IcountDecr *icount_decr_ptr;
CPUJumpCache *tb_jmp_cache; CPUJumpCache *tb_jmp_cache;
struct GDBRegisterState *gdb_regs; struct GDBRegisterState *gdb_regs;
@ -405,7 +543,6 @@ struct CPUState {
int cluster_index; int cluster_index;
uint32_t tcg_cflags; uint32_t tcg_cflags;
uint32_t halted; uint32_t halted;
uint32_t can_do_io;
int32_t exception_index; int32_t exception_index;
AccelCPUState *accel; AccelCPUState *accel;
@ -430,8 +567,24 @@ struct CPUState {
/* track IOMMUs whose translations we've cached in the TCG TLB */ /* track IOMMUs whose translations we've cached in the TCG TLB */
GArray *iommu_notifiers; GArray *iommu_notifiers;
/*
* MUST BE LAST in order to minimize the displacement to CPUArchState.
*/
char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
CPUNegativeOffsetState neg;
}; };
/* Validate placement of CPUNegativeOffsetState. */
QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
static inline CPUArchState *cpu_env(CPUState *cpu)
{
/* We validate that CPUArchState follows CPUState in cpu-all.h. */
return (CPUArchState *)(cpu + 1);
}
typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
extern CPUTailQ cpus; extern CPUTailQ cpus;

View File

@ -43,6 +43,8 @@ typedef struct AccelClass {
bool (*has_memory)(MachineState *ms, AddressSpace *as, bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size); hwaddr start_addr, hwaddr size);
#endif #endif
bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
void (*cpu_common_unrealize)(CPUState *cpu);
/* gdbstub related hooks */ /* gdbstub related hooks */
int (*gdbstub_supported_sstep_flags)(void); int (*gdbstub_supported_sstep_flags)(void);
@ -90,11 +92,17 @@ void accel_setup_post(MachineState *ms);
void accel_cpu_instance_init(CPUState *cpu); void accel_cpu_instance_init(CPUState *cpu);
/** /**
* accel_cpu_realizefn: * accel_cpu_common_realize:
* @cpu: The CPU that needs to call accel-specific cpu realization. * @cpu: The CPU that needs to call accel-specific cpu realization.
* @errp: currently unused. * @errp: currently unused.
*/ */
bool accel_cpu_realizefn(CPUState *cpu, Error **errp); bool accel_cpu_common_realize(CPUState *cpu, Error **errp);
/**
* accel_cpu_common_unrealize:
* @cpu: The CPU that needs to call accel-specific cpu unrealization.
*/
void accel_cpu_common_unrealize(CPUState *cpu);
/** /**
* accel_supported_gdbstub_sstep_flags: * accel_supported_gdbstub_sstep_flags:

58
include/tcg/startup.h Normal file
View File

@ -0,0 +1,58 @@
/*
* Tiny Code Generator for QEMU: definitions used by runtime startup
*
* Copyright (c) 2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef TCG_STARTUP_H
#define TCG_STARTUP_H
/**
* tcg_init: Initialize the TCG runtime
* @tb_size: translation buffer size
* @splitwx: use separate rw and rx mappings
* @max_cpus: number of vcpus in system mode
*
* Allocate and initialize TCG resources, especially the JIT buffer.
* In user-only mode, @max_cpus is unused.
*/
void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
/**
* tcg_register_thread: Register this thread with the TCG runtime
*
* All TCG threads except the parent (i.e. the one that called the TCG
* accelerator's init_machine() method) must register with this
* function before initiating translation.
*/
void tcg_register_thread(void);
/**
* tcg_prologue_init(): Generate the code for the TCG prologue
*
* In softmmu this is done automatically as part of the TCG
* accelerator's init_machine() method, but for user-mode, the
* user-mode code must call this function after it has loaded
* the guest binary and the value of guest_base is known.
*/
void tcg_prologue_init(void);
#endif

View File

@ -489,7 +489,6 @@ struct TCGContext {
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
int tlb_fast_offset;
int page_mask; int page_mask;
uint8_t page_bits; uint8_t page_bits;
uint8_t tlb_dyn_max_bits; uint8_t tlb_dyn_max_bits;
@ -577,7 +576,7 @@ static inline bool temp_readonly(TCGTemp *ts)
extern __thread TCGContext *tcg_ctx; extern __thread TCGContext *tcg_ctx;
extern const void *tcg_code_gen_epilogue; extern const void *tcg_code_gen_epilogue;
extern uintptr_t tcg_splitwx_diff; extern uintptr_t tcg_splitwx_diff;
extern TCGv_env cpu_env; extern TCGv_env tcg_env;
bool in_code_gen_buffer(const void *p); bool in_code_gen_buffer(const void *p);
@ -783,9 +782,6 @@ static inline void *tcg_malloc(int size)
} }
} }
void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
void tcg_register_thread(void);
void tcg_prologue_init(TCGContext *s);
void tcg_func_start(TCGContext *s); void tcg_func_start(TCGContext *s);
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start); int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start);

View File

@ -593,7 +593,7 @@ const char *elf_hwcap2_str(uint32_t bit)
static const char *get_elf_platform(void) static const char *get_elf_platform(void)
{ {
CPUARMState *env = thread_cpu->env_ptr; CPUARMState *env = cpu_env(thread_cpu);
#if TARGET_BIG_ENDIAN #if TARGET_BIG_ENDIAN
# define END "b" # define END "b"
@ -4430,7 +4430,7 @@ static int fill_note_info(struct elf_note_info *info,
if (cpu == thread_cpu) { if (cpu == thread_cpu) {
continue; continue;
} }
fill_thread_info(info, cpu->env_ptr); fill_thread_info(info, cpu_env(cpu));
} }
} }

View File

@ -22,9 +22,6 @@
#include "qemu.h" #include "qemu.h"
#include "user-internals.h" #include "user-internals.h"
#include "qemu/plugin.h" #include "qemu/plugin.h"
#ifdef CONFIG_GPROF
#include <sys/gmon.h>
#endif
#ifdef CONFIG_GCOV #ifdef CONFIG_GCOV
extern void __gcov_dump(void); extern void __gcov_dump(void);
@ -32,9 +29,6 @@ extern void __gcov_dump(void);
void preexit_cleanup(CPUArchState *env, int code) void preexit_cleanup(CPUArchState *env, int code)
{ {
#ifdef CONFIG_GPROF
_mcleanup();
#endif
#ifdef CONFIG_GCOV #ifdef CONFIG_GCOV
__gcov_dump(); __gcov_dump();
#endif #endif

View File

@ -25,7 +25,7 @@
struct target_sigcontext { struct target_sigcontext {
abi_ulong sc_flags; abi_ulong sc_flags;
abi_ulong sc_gr[32]; abi_ulong sc_gr[32];
uint64_t sc_fr[32]; abi_ullong sc_fr[32];
abi_ulong sc_iasq[2]; abi_ulong sc_iasq[2];
abi_ulong sc_iaoq[2]; abi_ulong sc_iaoq[2];
abi_ulong sc_sar; abi_ulong sc_sar;

View File

@ -323,7 +323,7 @@ void cpu_loop(CPUX86State *env)
static void target_cpu_free(void *obj) static void target_cpu_free(void *obj)
{ {
CPUArchState *env = ((CPUState *)obj)->env_ptr; CPUArchState *env = cpu_env(obj);
target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES); target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES);
g_free(obj); g_free(obj);
} }

View File

@ -41,7 +41,7 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/gdbstub.h" #include "exec/gdbstub.h"
#include "gdbstub/user.h" #include "gdbstub/user.h"
#include "tcg/tcg.h" #include "tcg/startup.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/envlist.h" #include "qemu/envlist.h"
#include "qemu/guest-random.h" #include "qemu/guest-random.h"
@ -229,7 +229,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
CPUState *new_cpu = cpu_create(cpu_type); CPUState *new_cpu = cpu_create(cpu_type);
CPUArchState *new_env = new_cpu->env_ptr; CPUArchState *new_env = cpu_env(new_cpu);
CPUBreakpoint *bp; CPUBreakpoint *bp;
/* Reset non arch specific state */ /* Reset non arch specific state */
@ -794,7 +794,7 @@ int main(int argc, char **argv, char **envp)
ac->init_machine(NULL); ac->init_machine(NULL);
} }
cpu = cpu_create(cpu_type); cpu = cpu_create(cpu_type);
env = cpu->env_ptr; env = cpu_env(cpu);
cpu_reset(cpu); cpu_reset(cpu);
thread_cpu = cpu; thread_cpu = cpu;
@ -994,7 +994,7 @@ int main(int argc, char **argv, char **envp)
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */ the real value of GUEST_BASE into account. */
tcg_prologue_init(tcg_ctx); tcg_prologue_init();
target_cpu_copy_regs(env, regs); target_cpu_copy_regs(env, regs);

View File

@ -588,11 +588,6 @@ void signal_init(void)
act.sa_flags = SA_SIGINFO; act.sa_flags = SA_SIGINFO;
act.sa_sigaction = host_signal_handler; act.sa_sigaction = host_signal_handler;
for(i = 1; i <= TARGET_NSIG; i++) { for(i = 1; i <= TARGET_NSIG; i++) {
#ifdef CONFIG_GPROF
if (i == TARGET_SIGPROF) {
continue;
}
#endif
host_sig = target_to_host_signal(i); host_sig = target_to_host_signal(i);
sigaction(host_sig, NULL, &oact); sigaction(host_sig, NULL, &oact);
if (oact.sa_sigaction == (void *)SIG_IGN) { if (oact.sa_sigaction == (void *)SIG_IGN) {
@ -618,7 +613,7 @@ void signal_init(void)
void force_sig(int sig) void force_sig(int sig)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
target_siginfo_t info = {}; target_siginfo_t info = {};
info.si_signo = sig; info.si_signo = sig;
@ -636,7 +631,7 @@ void force_sig(int sig)
void force_sig_fault(int sig, int code, abi_ulong addr) void force_sig_fault(int sig, int code, abi_ulong addr)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu_env(cpu);
target_siginfo_t info = {}; target_siginfo_t info = {};
info.si_signo = sig; info.si_signo = sig;
@ -695,10 +690,9 @@ void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
/* abort execution with signal */ /* abort execution with signal */
static G_NORETURN static G_NORETURN
void dump_core_and_abort(CPUArchState *cpu_env, int target_sig) void dump_core_and_abort(CPUArchState *env, int target_sig)
{ {
CPUState *cpu = thread_cpu; CPUState *cpu = env_cpu(env);
CPUArchState *env = cpu->env_ptr;
TaskState *ts = (TaskState *)cpu->opaque; TaskState *ts = (TaskState *)cpu->opaque;
int host_sig, core_dumped = 0; int host_sig, core_dumped = 0;
struct sigaction act; struct sigaction act;
@ -724,7 +718,7 @@ void dump_core_and_abort(CPUArchState *cpu_env, int target_sig)
target_sig, strsignal(host_sig), "core dumped" ); target_sig, strsignal(host_sig), "core dumped" );
} }
preexit_cleanup(cpu_env, 128 + target_sig); preexit_cleanup(env, 128 + target_sig);
/* The proper exit code for dying from an uncaught signal is /* The proper exit code for dying from an uncaught signal is
* -<signal>. The kernel doesn't allow exit() or _exit() to pass * -<signal>. The kernel doesn't allow exit() or _exit() to pass
@ -783,8 +777,8 @@ static inline void rewind_if_in_safe_syscall(void *puc)
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{ {
CPUArchState *env = thread_cpu->env_ptr; CPUState *cpu = thread_cpu;
CPUState *cpu = env_cpu(env); CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque; TaskState *ts = cpu->opaque;
target_siginfo_t tinfo; target_siginfo_t tinfo;
host_sigcontext *uc = puc; host_sigcontext *uc = puc;

View File

@ -23,6 +23,7 @@
#include "qemu/memfd.h" #include "qemu/memfd.h"
#include "qemu/queue.h" #include "qemu/queue.h"
#include "qemu/plugin.h" #include "qemu/plugin.h"
#include "tcg/startup.h"
#include "target_mman.h" #include "target_mman.h"
#include <elf.h> #include <elf.h>
#include <endian.h> #include <endian.h>
@ -141,7 +142,6 @@
#include "special-errno.h" #include "special-errno.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "fd-trans.h" #include "fd-trans.h"
#include "tcg/tcg.h"
#include "cpu_loop-common.h" #include "cpu_loop-common.h"
#ifndef CLONE_IO #ifndef CLONE_IO

View File

@ -254,11 +254,6 @@ if host_arch == 'i386' and not cc.links('''
qemu_common_flags = ['-march=i486'] + qemu_common_flags qemu_common_flags = ['-march=i486'] + qemu_common_flags
endif endif
if get_option('gprof')
qemu_common_flags += ['-p']
qemu_ldflags += ['-p']
endif
if get_option('prefer_static') if get_option('prefer_static')
qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static' qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static'
endif endif
@ -2204,7 +2199,6 @@ config_host_data.set('CONFIG_DEBUG_GRAPH_LOCK', get_option('debug_graph_lock'))
config_host_data.set('CONFIG_DEBUG_MUTEX', get_option('debug_mutex')) config_host_data.set('CONFIG_DEBUG_MUTEX', get_option('debug_mutex'))
config_host_data.set('CONFIG_DEBUG_STACK_USAGE', get_option('debug_stack_usage')) config_host_data.set('CONFIG_DEBUG_STACK_USAGE', get_option('debug_stack_usage'))
config_host_data.set('CONFIG_DEBUG_TCG', get_option('debug_tcg')) config_host_data.set('CONFIG_DEBUG_TCG', get_option('debug_tcg'))
config_host_data.set('CONFIG_GPROF', get_option('gprof'))
config_host_data.set('CONFIG_LIVE_BLOCK_MIGRATION', get_option('live_block_migration').allowed()) config_host_data.set('CONFIG_LIVE_BLOCK_MIGRATION', get_option('live_block_migration').allowed())
config_host_data.set('CONFIG_QOM_CAST_DEBUG', get_option('qom_cast_debug')) config_host_data.set('CONFIG_QOM_CAST_DEBUG', get_option('qom_cast_debug'))
config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed()) config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed())
@ -3421,8 +3415,8 @@ if have_block
system_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')]) system_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')])
endif endif
common_ss.add(files('cpus-common.c')) common_ss.add(files('cpu-common.c'))
specific_ss.add(files('cpu.c')) specific_ss.add(files('cpu-target.c'))
subdir('softmmu') subdir('softmmu')
@ -3444,7 +3438,7 @@ if get_option('b_lto')
pagevary = declare_dependency(link_with: pagevary) pagevary = declare_dependency(link_with: pagevary)
endif endif
common_ss.add(pagevary) common_ss.add(pagevary)
specific_ss.add(files('page-vary.c')) specific_ss.add(files('page-vary-target.c'))
subdir('backends') subdir('backends')
subdir('disas') subdir('disas')
@ -4118,12 +4112,6 @@ summary_info += {'memory allocator': get_option('malloc')}
summary_info += {'avx2 optimization': config_host_data.get('CONFIG_AVX2_OPT')} summary_info += {'avx2 optimization': config_host_data.get('CONFIG_AVX2_OPT')}
summary_info += {'avx512bw optimization': config_host_data.get('CONFIG_AVX512BW_OPT')} summary_info += {'avx512bw optimization': config_host_data.get('CONFIG_AVX512BW_OPT')}
summary_info += {'avx512f optimization': config_host_data.get('CONFIG_AVX512F_OPT')} summary_info += {'avx512f optimization': config_host_data.get('CONFIG_AVX512F_OPT')}
if get_option('gprof')
gprof_info = 'YES (deprecated)'
else
gprof_info = get_option('gprof')
endif
summary_info += {'gprof': gprof_info}
summary_info += {'gcov': get_option('b_coverage')} summary_info += {'gcov': get_option('b_coverage')}
summary_info += {'thread sanitizer': get_option('tsan')} summary_info += {'thread sanitizer': get_option('tsan')}
summary_info += {'CFI support': get_option('cfi')} summary_info += {'CFI support': get_option('cfi')}

View File

@ -348,9 +348,6 @@ option('debug_stack_usage', type: 'boolean', value: false,
description: 'measure coroutine stack usage') description: 'measure coroutine stack usage')
option('qom_cast_debug', type: 'boolean', value: true, option('qom_cast_debug', type: 'boolean', value: true,
description: 'cast debugging support') description: 'cast debugging support')
option('gprof', type: 'boolean', value: false,
description: 'QEMU profiling with gprof',
deprecated: true)
option('slirp_smbd', type : 'feature', value : 'auto', option('slirp_smbd', type : 'feature', value : 'auto',
description: 'use smbd (at path --smbd=*) in slirp networking') description: 'use smbd (at path --smbd=*) in slirp networking')

View File

@ -81,7 +81,7 @@ CPUArchState *mon_get_cpu_env(Monitor *mon)
{ {
CPUState *cs = mon_get_cpu(mon); CPUState *cs = mon_get_cpu(mon);
return cs ? cs->env_ptr : NULL; return cs ? cpu_env(cs) : NULL;
} }
int monitor_get_cpu_index(Monitor *mon) int monitor_get_cpu_index(Monitor *mon)

View File

@ -220,6 +220,19 @@ static size_t type_object_get_size(TypeImpl *ti)
return 0; return 0;
} }
static size_t type_object_get_align(TypeImpl *ti)
{
if (ti->instance_align) {
return ti->instance_align;
}
if (type_has_parent(ti)) {
return type_object_get_align(type_get_parent(ti));
}
return 0;
}
size_t object_type_get_instance_size(const char *typename) size_t object_type_get_instance_size(const char *typename)
{ {
TypeImpl *type = type_get_by_name(typename); TypeImpl *type = type_get_by_name(typename);
@ -293,6 +306,7 @@ static void type_initialize(TypeImpl *ti)
ti->class_size = type_class_get_size(ti); ti->class_size = type_class_get_size(ti);
ti->instance_size = type_object_get_size(ti); ti->instance_size = type_object_get_size(ti);
ti->instance_align = type_object_get_align(ti);
/* Any type with zero instance_size is implicitly abstract. /* Any type with zero instance_size is implicitly abstract.
* This means interface types are all abstract. * This means interface types are all abstract.
*/ */

View File

@ -34,7 +34,6 @@ meson_options_help() {
printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)' printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)'
printf "%s\n" ' --enable-fuzzing build fuzzing targets' printf "%s\n" ' --enable-fuzzing build fuzzing targets'
printf "%s\n" ' --enable-gcov Enable coverage tracking.' printf "%s\n" ' --enable-gcov Enable coverage tracking.'
printf "%s\n" ' --enable-gprof QEMU profiling with gprof'
printf "%s\n" ' --enable-lto Use link time optimization' printf "%s\n" ' --enable-lto Use link time optimization'
printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:' printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:'
printf "%s\n" ' jemalloc/system/tcmalloc)' printf "%s\n" ' jemalloc/system/tcmalloc)'
@ -309,8 +308,6 @@ _meson_option_parse() {
--disable-glusterfs) printf "%s" -Dglusterfs=disabled ;; --disable-glusterfs) printf "%s" -Dglusterfs=disabled ;;
--enable-gnutls) printf "%s" -Dgnutls=enabled ;; --enable-gnutls) printf "%s" -Dgnutls=enabled ;;
--disable-gnutls) printf "%s" -Dgnutls=disabled ;; --disable-gnutls) printf "%s" -Dgnutls=disabled ;;
--enable-gprof) printf "%s" -Dgprof=true ;;
--disable-gprof) printf "%s" -Dgprof=false ;;
--enable-gtk) printf "%s" -Dgtk=enabled ;; --enable-gtk) printf "%s" -Dgtk=enabled ;;
--disable-gtk) printf "%s" -Dgtk=disabled ;; --disable-gtk) printf "%s" -Dgtk=disabled ;;
--enable-gtk-clipboard) printf "%s" -Dgtk_clipboard=enabled ;; --enable-gtk-clipboard) printf "%s" -Dgtk_clipboard=enabled ;;

View File

@ -251,7 +251,7 @@ static void common_semi_dead_cb(CPUState *cs, uint64_t ret, int err)
static void common_semi_rw_cb(CPUState *cs, uint64_t ret, int err) static void common_semi_rw_cb(CPUState *cs, uint64_t ret, int err)
{ {
/* Recover the original length from the third argument. */ /* Recover the original length from the third argument. */
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
target_ulong args = common_semi_arg(cs, 1); target_ulong args = common_semi_arg(cs, 1);
target_ulong arg2; target_ulong arg2;
GET_ARG(2); GET_ARG(2);
@ -322,7 +322,7 @@ static void
common_semi_readc_cb(CPUState *cs, uint64_t ret, int err) common_semi_readc_cb(CPUState *cs, uint64_t ret, int err)
{ {
if (!err) { if (!err) {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
uint8_t ch; uint8_t ch;
if (get_user_u8(ch, common_semi_stack_bottom(cs) - 1)) { if (get_user_u8(ch, common_semi_stack_bottom(cs) - 1)) {
@ -361,7 +361,7 @@ static const uint8_t featurefile_data[] = {
*/ */
void do_common_semihosting(CPUState *cs) void do_common_semihosting(CPUState *cs)
{ {
CPUArchState *env = cs->env_ptr; CPUArchState *env = cpu_env(cs);
target_ulong args; target_ulong args;
target_ulong arg0, arg1, arg2, arg3; target_ulong arg0, arg1, arg2, arg3;
target_ulong ul_ret; target_ulong ul_ret;

View File

@ -24,7 +24,7 @@
*/ */
static int validate_strlen(CPUState *cs, target_ulong str, target_ulong tlen) static int validate_strlen(CPUState *cs, target_ulong str, target_ulong tlen)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char c; char c;
if (tlen == 0) { if (tlen == 0) {
@ -54,7 +54,7 @@ static int validate_lock_user_string(char **pstr, CPUState *cs,
target_ulong tstr, target_ulong tlen) target_ulong tstr, target_ulong tlen)
{ {
int ret = validate_strlen(cs, tstr, tlen); int ret = validate_strlen(cs, tstr, tlen);
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *str = NULL; char *str = NULL;
if (ret > 0) { if (ret > 0) {
@ -74,7 +74,7 @@ static int validate_lock_user_string(char **pstr, CPUState *cs,
static int copy_stat_to_user(CPUState *cs, target_ulong addr, static int copy_stat_to_user(CPUState *cs, target_ulong addr,
const struct stat *s) const struct stat *s)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
struct gdb_stat *p; struct gdb_stat *p;
if (s->st_dev != (uint32_t)s->st_dev || if (s->st_dev != (uint32_t)s->st_dev ||
@ -258,7 +258,7 @@ static void host_open(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong fname, target_ulong fname_len, target_ulong fname, target_ulong fname_len,
int gdb_flags, int mode) int gdb_flags, int mode)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *p; char *p;
int ret, host_flags = O_BINARY; int ret, host_flags = O_BINARY;
@ -316,7 +316,7 @@ static void host_close(CPUState *cs, gdb_syscall_complete_cb complete,
static void host_read(CPUState *cs, gdb_syscall_complete_cb complete, static void host_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len) GuestFD *gf, target_ulong buf, target_ulong len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
void *ptr = lock_user(VERIFY_WRITE, buf, len, 0); void *ptr = lock_user(VERIFY_WRITE, buf, len, 0);
ssize_t ret; ssize_t ret;
@ -337,7 +337,7 @@ static void host_read(CPUState *cs, gdb_syscall_complete_cb complete,
static void host_write(CPUState *cs, gdb_syscall_complete_cb complete, static void host_write(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len) GuestFD *gf, target_ulong buf, target_ulong len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
void *ptr = lock_user(VERIFY_READ, buf, len, 1); void *ptr = lock_user(VERIFY_READ, buf, len, 1);
ssize_t ret; ssize_t ret;
@ -411,7 +411,7 @@ static void host_stat(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong fname, target_ulong fname_len, target_ulong fname, target_ulong fname_len,
target_ulong addr) target_ulong addr)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
struct stat buf; struct stat buf;
char *name; char *name;
int ret, err; int ret, err;
@ -440,7 +440,7 @@ static void host_stat(CPUState *cs, gdb_syscall_complete_cb complete,
static void host_remove(CPUState *cs, gdb_syscall_complete_cb complete, static void host_remove(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong fname, target_ulong fname_len) target_ulong fname, target_ulong fname_len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *p; char *p;
int ret; int ret;
@ -459,7 +459,7 @@ static void host_rename(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong oname, target_ulong oname_len, target_ulong oname, target_ulong oname_len,
target_ulong nname, target_ulong nname_len) target_ulong nname, target_ulong nname_len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *ostr, *nstr; char *ostr, *nstr;
int ret; int ret;
@ -484,7 +484,7 @@ static void host_rename(CPUState *cs, gdb_syscall_complete_cb complete,
static void host_system(CPUState *cs, gdb_syscall_complete_cb complete, static void host_system(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong cmd, target_ulong cmd_len) target_ulong cmd, target_ulong cmd_len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *p; char *p;
int ret; int ret;
@ -502,7 +502,7 @@ static void host_system(CPUState *cs, gdb_syscall_complete_cb complete,
static void host_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, static void host_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong tv_addr, target_ulong tz_addr) target_ulong tv_addr, target_ulong tz_addr)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
struct gdb_timeval *p; struct gdb_timeval *p;
int64_t rt; int64_t rt;
@ -547,7 +547,7 @@ static void host_poll_one(CPUState *cs, gdb_syscall_complete_cb complete,
static void staticfile_read(CPUState *cs, gdb_syscall_complete_cb complete, static void staticfile_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len) GuestFD *gf, target_ulong buf, target_ulong len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
target_ulong rest = gf->staticfile.len - gf->staticfile.off; target_ulong rest = gf->staticfile.len - gf->staticfile.off;
void *ptr; void *ptr;
@ -605,7 +605,7 @@ static void staticfile_flen(CPUState *cs, gdb_syscall_complete_cb complete,
static void console_read(CPUState *cs, gdb_syscall_complete_cb complete, static void console_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len) GuestFD *gf, target_ulong buf, target_ulong len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *ptr; char *ptr;
int ret; int ret;
@ -622,7 +622,7 @@ static void console_read(CPUState *cs, gdb_syscall_complete_cb complete,
static void console_write(CPUState *cs, gdb_syscall_complete_cb complete, static void console_write(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len) GuestFD *gf, target_ulong buf, target_ulong len)
{ {
CPUArchState *env G_GNUC_UNUSED = cs->env_ptr; CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *ptr = lock_user(VERIFY_READ, buf, len, 1); char *ptr = lock_user(VERIFY_READ, buf, len, 1);
int ret; int ret;

View File

@ -6,10 +6,6 @@ specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_true: [files(
'watchpoint.c', 'watchpoint.c',
)]) )])
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: [files(
'icount.c',
)])
system_ss.add(files( system_ss.add(files(
'balloon.c', 'balloon.c',
'bootdevice.c', 'bootdevice.c',

View File

@ -177,7 +177,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
* Force recompile to succeed, because icount may * Force recompile to succeed, because icount may
* be read only at the end of the block. * be read only at the end of the block.
*/ */
if (!cpu->can_do_io) { if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */ /* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
| curr_cflags(cpu); | curr_cflags(cpu);

View File

@ -209,8 +209,6 @@ static void alpha_cpu_initfn(Object *obj)
AlphaCPU *cpu = ALPHA_CPU(obj); AlphaCPU *cpu = ALPHA_CPU(obj);
CPUAlphaState *env = &cpu->env; CPUAlphaState *env = &cpu->env;
cpu_set_cpustate_pointers(cpu);
env->lock_addr = -1; env->lock_addr = -1;
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
@ -286,6 +284,7 @@ static const TypeInfo alpha_cpu_type_infos[] = {
.name = TYPE_ALPHA_CPU, .name = TYPE_ALPHA_CPU,
.parent = TYPE_CPU, .parent = TYPE_CPU,
.instance_size = sizeof(AlphaCPU), .instance_size = sizeof(AlphaCPU),
.instance_align = __alignof(AlphaCPU),
.instance_init = alpha_cpu_initfn, .instance_init = alpha_cpu_initfn,
.abstract = true, .abstract = true,
.class_size = sizeof(AlphaCPUClass), .class_size = sizeof(AlphaCPUClass),

View File

@ -263,7 +263,6 @@ struct ArchCPU {
CPUState parent_obj; CPUState parent_obj;
/*< public >*/ /*< public >*/
CPUNegativeOffsetState neg;
CPUAlphaState env; CPUAlphaState env;
/* This alarm doesn't exist in real hardware; we wish it did. */ /* This alarm doesn't exist in real hardware; we wish it did. */

View File

@ -131,13 +131,13 @@ void alpha_translate_init(void)
int i; int i;
for (i = 0; i < 31; i++) { for (i = 0; i < 31; i++) {
cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env, cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUAlphaState, ir[i]), offsetof(CPUAlphaState, ir[i]),
greg_names[i]); greg_names[i]);
} }
for (i = 0; i < 31; i++) { for (i = 0; i < 31; i++) {
cpu_fir[i] = tcg_global_mem_new_i64(cpu_env, cpu_fir[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUAlphaState, fir[i]), offsetof(CPUAlphaState, fir[i]),
freg_names[i]); freg_names[i]);
} }
@ -146,7 +146,7 @@ void alpha_translate_init(void)
memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir)); memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
int r = (i == 7 ? 25 : i + 8); int r = (i == 7 ? 25 : i + 8);
cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env, cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUAlphaState, offsetof(CPUAlphaState,
shadow[i]), shadow[i]),
shadow_names[i]); shadow_names[i]);
@ -155,7 +155,7 @@ void alpha_translate_init(void)
for (i = 0; i < ARRAY_SIZE(vars); ++i) { for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i]; const GlobalVar *v = &vars[i];
*v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name); *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name);
} }
} }
@ -244,12 +244,12 @@ static int get_flag_ofs(unsigned shift)
static void ld_flag_byte(TCGv val, unsigned shift) static void ld_flag_byte(TCGv val, unsigned shift)
{ {
tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift)); tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift));
} }
static void st_flag_byte(TCGv val, unsigned shift) static void st_flag_byte(TCGv val, unsigned shift)
{ {
tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift)); tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift));
} }
static void gen_excp_1(int exception, int error_code) static void gen_excp_1(int exception, int error_code)
@ -258,7 +258,7 @@ static void gen_excp_1(int exception, int error_code)
tmp1 = tcg_constant_i32(exception); tmp1 = tcg_constant_i32(exception);
tmp2 = tcg_constant_i32(error_code); tmp2 = tcg_constant_i32(error_code);
gen_helper_excp(cpu_env, tmp1, tmp2); gen_helper_excp(tcg_env, tmp1, tmp2);
} }
static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code) static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
@ -582,7 +582,7 @@ static void gen_qual_roundmode(DisasContext *ctx, int fn11)
tcg_gen_movi_i32(tmp, float_round_down); tcg_gen_movi_i32(tmp, float_round_down);
break; break;
case QUAL_RM_D: case QUAL_RM_D:
tcg_gen_ld8u_i32(tmp, cpu_env, tcg_gen_ld8u_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fpcr_dyn_round)); offsetof(CPUAlphaState, fpcr_dyn_round));
break; break;
} }
@ -591,7 +591,7 @@ static void gen_qual_roundmode(DisasContext *ctx, int fn11)
/* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
With CONFIG_SOFTFLOAT that expands to an out-of-line call that just With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
sets the one field. */ sets the one field. */
tcg_gen_st8_i32(tmp, cpu_env, tcg_gen_st8_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fp_status.float_rounding_mode)); offsetof(CPUAlphaState, fp_status.float_rounding_mode));
#else #else
gen_helper_setroundmode(tmp); gen_helper_setroundmode(tmp);
@ -611,7 +611,7 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11)
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
if (fn11) { if (fn11) {
/* Underflow is enabled, use the FPCR setting. */ /* Underflow is enabled, use the FPCR setting. */
tcg_gen_ld8u_i32(tmp, cpu_env, tcg_gen_ld8u_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fpcr_flush_to_zero)); offsetof(CPUAlphaState, fpcr_flush_to_zero));
} else { } else {
/* Underflow is disabled, force flush-to-zero. */ /* Underflow is disabled, force flush-to-zero. */
@ -619,7 +619,7 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11)
} }
#if defined(CONFIG_SOFTFLOAT_INLINE) #if defined(CONFIG_SOFTFLOAT_INLINE)
tcg_gen_st8_i32(tmp, cpu_env, tcg_gen_st8_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fp_status.flush_to_zero)); offsetof(CPUAlphaState, fp_status.flush_to_zero));
#else #else
gen_helper_setflushzero(tmp); gen_helper_setflushzero(tmp);
@ -636,16 +636,16 @@ static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
val = cpu_fir[reg]; val = cpu_fir[reg];
if ((fn11 & QUAL_S) == 0) { if ((fn11 & QUAL_S) == 0) {
if (is_cmp) { if (is_cmp) {
gen_helper_ieee_input_cmp(cpu_env, val); gen_helper_ieee_input_cmp(tcg_env, val);
} else { } else {
gen_helper_ieee_input(cpu_env, val); gen_helper_ieee_input(tcg_env, val);
} }
} else { } else {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* In system mode, raise exceptions for denormals like real /* In system mode, raise exceptions for denormals like real
hardware. In user mode, proceed as if the OS completion hardware. In user mode, proceed as if the OS completion
handler is handling the denormal as per spec. */ handler is handling the denormal as per spec. */
gen_helper_ieee_input_s(cpu_env, val); gen_helper_ieee_input_s(tcg_env, val);
#endif #endif
} }
} }
@ -678,9 +678,9 @@ static void gen_fp_exc_raise(int rc, int fn11)
or if we were to do something clever with imprecise exceptions. */ or if we were to do something clever with imprecise exceptions. */
reg = tcg_constant_i32(rc + 32); reg = tcg_constant_i32(rc + 32);
if (fn11 & QUAL_S) { if (fn11 & QUAL_S) {
gen_helper_fp_exc_raise_s(cpu_env, ign, reg); gen_helper_fp_exc_raise_s(tcg_env, ign, reg);
} else { } else {
gen_helper_fp_exc_raise(cpu_env, ign, reg); gen_helper_fp_exc_raise(tcg_env, ign, reg);
} }
} }
@ -705,7 +705,7 @@ static void gen_ieee_arith2(DisasContext *ctx,
gen_qual_flushzero(ctx, fn11); gen_qual_flushzero(ctx, fn11);
vb = gen_ieee_input(ctx, rb, fn11, 0); vb = gen_ieee_input(ctx, rb, fn11, 0);
helper(dest_fpr(ctx, rc), cpu_env, vb); helper(dest_fpr(ctx, rc), tcg_env, vb);
gen_fp_exc_raise(rc, fn11); gen_fp_exc_raise(rc, fn11);
} }
@ -732,10 +732,10 @@ static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
/* Almost all integer conversions use cropped rounding; /* Almost all integer conversions use cropped rounding;
special case that. */ special case that. */
if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
gen_helper_cvttq_c(vc, cpu_env, vb); gen_helper_cvttq_c(vc, tcg_env, vb);
} else { } else {
gen_qual_roundmode(ctx, fn11); gen_qual_roundmode(ctx, fn11);
gen_helper_cvttq(vc, cpu_env, vb); gen_helper_cvttq(vc, tcg_env, vb);
} }
gen_fp_exc_raise(rc, fn11); gen_fp_exc_raise(rc, fn11);
} }
@ -754,10 +754,10 @@ static void gen_ieee_intcvt(DisasContext *ctx,
is inexact. Thus we only need to worry about exceptions when is inexact. Thus we only need to worry about exceptions when
inexact handling is requested. */ inexact handling is requested. */
if (fn11 & QUAL_I) { if (fn11 & QUAL_I) {
helper(vc, cpu_env, vb); helper(vc, tcg_env, vb);
gen_fp_exc_raise(rc, fn11); gen_fp_exc_raise(rc, fn11);
} else { } else {
helper(vc, cpu_env, vb); helper(vc, tcg_env, vb);
} }
} }
@ -797,7 +797,7 @@ static void gen_ieee_arith3(DisasContext *ctx,
va = gen_ieee_input(ctx, ra, fn11, 0); va = gen_ieee_input(ctx, ra, fn11, 0);
vb = gen_ieee_input(ctx, rb, fn11, 0); vb = gen_ieee_input(ctx, rb, fn11, 0);
vc = dest_fpr(ctx, rc); vc = dest_fpr(ctx, rc);
helper(vc, cpu_env, va, vb); helper(vc, tcg_env, va, vb);
gen_fp_exc_raise(rc, fn11); gen_fp_exc_raise(rc, fn11);
} }
@ -826,7 +826,7 @@ static void gen_ieee_compare(DisasContext *ctx,
va = gen_ieee_input(ctx, ra, fn11, 1); va = gen_ieee_input(ctx, ra, fn11, 1);
vb = gen_ieee_input(ctx, rb, fn11, 1); vb = gen_ieee_input(ctx, rb, fn11, 1);
vc = dest_fpr(ctx, rc); vc = dest_fpr(ctx, rc);
helper(vc, cpu_env, va, vb); helper(vc, tcg_env, va, vb);
gen_fp_exc_raise(rc, fn11); gen_fp_exc_raise(rc, fn11);
} }
@ -1059,12 +1059,12 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
break; break;
case 0x9E: case 0x9E:
/* RDUNIQUE */ /* RDUNIQUE */
tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
offsetof(CPUAlphaState, unique)); offsetof(CPUAlphaState, unique));
break; break;
case 0x9F: case 0x9F:
/* WRUNIQUE */ /* WRUNIQUE */
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, unique)); offsetof(CPUAlphaState, unique));
break; break;
default: default:
@ -1088,17 +1088,17 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
break; break;
case 0x2D: case 0x2D:
/* WRVPTPTR */ /* WRVPTPTR */
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, vptptr)); offsetof(CPUAlphaState, vptptr));
break; break;
case 0x31: case 0x31:
/* WRVAL */ /* WRVAL */
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, sysval)); offsetof(CPUAlphaState, sysval));
break; break;
case 0x32: case 0x32:
/* RDVAL */ /* RDVAL */
tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
offsetof(CPUAlphaState, sysval)); offsetof(CPUAlphaState, sysval));
break; break;
@ -1126,23 +1126,23 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
case 0x38: case 0x38:
/* WRUSP */ /* WRUSP */
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, usp)); offsetof(CPUAlphaState, usp));
break; break;
case 0x3A: case 0x3A:
/* RDUSP */ /* RDUSP */
tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env, tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
offsetof(CPUAlphaState, usp)); offsetof(CPUAlphaState, usp));
break; break;
case 0x3C: case 0x3C:
/* WHAMI */ /* WHAMI */
tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env, tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break; break;
case 0x3E: case 0x3E:
/* WTINT */ /* WTINT */
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(AlphaCPU, env) + -offsetof(AlphaCPU, env) +
offsetof(CPUState, halted)); offsetof(CPUState, halted));
tcg_gen_movi_i64(ctx->ir[IR_V0], 0); tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
@ -1174,7 +1174,7 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
} }
tcg_gen_movi_i64(tmp, exc_addr); tcg_gen_movi_i64(tmp, exc_addr);
tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr));
entry += (palcode & 0x80 entry += (palcode & 0x80
? 0x2000 + (palcode - 0x80) * 64 ? 0x2000 + (palcode - 0x80) * 64
@ -1254,9 +1254,9 @@ static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
if (data == 0) { if (data == 0) {
tcg_gen_movi_i64(va, 0); tcg_gen_movi_i64(va, 0);
} else if (data & PR_LONG) { } else if (data & PR_LONG) {
tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG);
} else { } else {
tcg_gen_ld_i64(va, cpu_env, data); tcg_gen_ld_i64(va, tcg_env, data);
} }
break; break;
} }
@ -1272,17 +1272,17 @@ static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
switch (regno) { switch (regno) {
case 255: case 255:
/* TBIA */ /* TBIA */
gen_helper_tbia(cpu_env); gen_helper_tbia(tcg_env);
break; break;
case 254: case 254:
/* TBIS */ /* TBIS */
gen_helper_tbis(cpu_env, vb); gen_helper_tbis(tcg_env, vb);
break; break;
case 253: case 253:
/* WAIT */ /* WAIT */
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, halted)); -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
return gen_excp(ctx, EXCP_HALTED, 0); return gen_excp(ctx, EXCP_HALTED, 0);
@ -1296,16 +1296,16 @@ static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
if (translator_io_start(&ctx->base)) { if (translator_io_start(&ctx->base)) {
ret = DISAS_PC_STALE; ret = DISAS_PC_STALE;
} }
gen_helper_set_alarm(cpu_env, vb); gen_helper_set_alarm(tcg_env, vb);
break; break;
case 7: case 7:
/* PALBR */ /* PALBR */
tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr)); tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr));
/* Changing the PAL base register implies un-chaining all of the TBs /* Changing the PAL base register implies un-chaining all of the TBs
that ended with a CALL_PAL. Since the base register usually only that ended with a CALL_PAL. Since the base register usually only
changes during boot, flushing everything works well. */ changes during boot, flushing everything works well. */
gen_helper_tb_flush(cpu_env); gen_helper_tb_flush(tcg_env);
return DISAS_PC_STALE; return DISAS_PC_STALE;
case 32 ... 39: case 32 ... 39:
@ -1327,9 +1327,9 @@ static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
data = cpu_pr_data(regno); data = cpu_pr_data(regno);
if (data != 0) { if (data != 0) {
if (data & PR_LONG) { if (data & PR_LONG) {
tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG);
} else { } else {
tcg_gen_st_i64(vb, cpu_env, data); tcg_gen_st_i64(vb, tcg_env, data);
} }
} }
break; break;
@ -1594,7 +1594,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
tcg_gen_ext32s_i64(vc, vb); tcg_gen_ext32s_i64(vc, vb);
tcg_gen_add_i64(tmp, tmp, vc); tcg_gen_add_i64(tmp, tmp, vc);
tcg_gen_ext32s_i64(vc, tmp); tcg_gen_ext32s_i64(vc, tmp);
gen_helper_check_overflow(cpu_env, vc, tmp); gen_helper_check_overflow(tcg_env, vc, tmp);
break; break;
case 0x49: case 0x49:
/* SUBL/V */ /* SUBL/V */
@ -1603,7 +1603,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
tcg_gen_ext32s_i64(vc, vb); tcg_gen_ext32s_i64(vc, vb);
tcg_gen_sub_i64(tmp, tmp, vc); tcg_gen_sub_i64(tmp, tmp, vc);
tcg_gen_ext32s_i64(vc, tmp); tcg_gen_ext32s_i64(vc, tmp);
gen_helper_check_overflow(cpu_env, vc, tmp); gen_helper_check_overflow(tcg_env, vc, tmp);
break; break;
case 0x4D: case 0x4D:
/* CMPLT */ /* CMPLT */
@ -1620,7 +1620,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
tcg_gen_and_i64(tmp, tmp, tmp2); tcg_gen_and_i64(tmp, tmp, tmp2);
tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_shri_i64(tmp, tmp, 63);
tcg_gen_movi_i64(tmp2, 0); tcg_gen_movi_i64(tmp2, 0);
gen_helper_check_overflow(cpu_env, tmp, tmp2); gen_helper_check_overflow(tcg_env, tmp, tmp2);
break; break;
case 0x69: case 0x69:
/* SUBQ/V */ /* SUBQ/V */
@ -1633,7 +1633,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
tcg_gen_and_i64(tmp, tmp, tmp2); tcg_gen_and_i64(tmp, tmp, tmp2);
tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_shri_i64(tmp, tmp, 63);
tcg_gen_movi_i64(tmp2, 0); tcg_gen_movi_i64(tmp2, 0);
gen_helper_check_overflow(cpu_env, tmp, tmp2); gen_helper_check_overflow(tcg_env, tmp, tmp2);
break; break;
case 0x6D: case 0x6D:
/* CMPLE */ /* CMPLE */
@ -1924,7 +1924,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
tcg_gen_ext32s_i64(vc, vb); tcg_gen_ext32s_i64(vc, vb);
tcg_gen_mul_i64(tmp, tmp, vc); tcg_gen_mul_i64(tmp, tmp, vc);
tcg_gen_ext32s_i64(vc, tmp); tcg_gen_ext32s_i64(vc, tmp);
gen_helper_check_overflow(cpu_env, vc, tmp); gen_helper_check_overflow(tcg_env, vc, tmp);
break; break;
case 0x60: case 0x60:
/* MULQ/V */ /* MULQ/V */
@ -1932,7 +1932,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
tmp2 = tcg_temp_new(); tmp2 = tcg_temp_new();
tcg_gen_muls2_i64(vc, tmp, va, vb); tcg_gen_muls2_i64(vc, tmp, va, vb);
tcg_gen_sari_i64(tmp2, vc, 63); tcg_gen_sari_i64(tmp2, vc, 63);
gen_helper_check_overflow(cpu_env, tmp, tmp2); gen_helper_check_overflow(tcg_env, tmp, tmp2);
break; break;
default: default:
goto invalid_opc; goto invalid_opc;
@ -1957,7 +1957,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_REG_31(ra); REQUIRE_REG_31(ra);
REQUIRE_FEN; REQUIRE_FEN;
vb = load_fpr(ctx, rb); vb = load_fpr(ctx, rb);
gen_helper_sqrtf(vc, cpu_env, vb); gen_helper_sqrtf(vc, tcg_env, vb);
break; break;
case 0x0B: case 0x0B:
/* SQRTS */ /* SQRTS */
@ -1986,7 +1986,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_REG_31(ra); REQUIRE_REG_31(ra);
REQUIRE_FEN; REQUIRE_FEN;
vb = load_fpr(ctx, rb); vb = load_fpr(ctx, rb);
gen_helper_sqrtg(vc, cpu_env, vb); gen_helper_sqrtg(vc, tcg_env, vb);
break; break;
case 0x02B: case 0x02B:
/* SQRTT */ /* SQRTT */
@ -2009,22 +2009,22 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x00: case 0x00:
/* ADDF */ /* ADDF */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_addf(vc, cpu_env, va, vb); gen_helper_addf(vc, tcg_env, va, vb);
break; break;
case 0x01: case 0x01:
/* SUBF */ /* SUBF */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_subf(vc, cpu_env, va, vb); gen_helper_subf(vc, tcg_env, va, vb);
break; break;
case 0x02: case 0x02:
/* MULF */ /* MULF */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_mulf(vc, cpu_env, va, vb); gen_helper_mulf(vc, tcg_env, va, vb);
break; break;
case 0x03: case 0x03:
/* DIVF */ /* DIVF */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_divf(vc, cpu_env, va, vb); gen_helper_divf(vc, tcg_env, va, vb);
break; break;
case 0x1E: case 0x1E:
/* CVTDG -- TODO */ /* CVTDG -- TODO */
@ -2033,43 +2033,43 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x20: case 0x20:
/* ADDG */ /* ADDG */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_addg(vc, cpu_env, va, vb); gen_helper_addg(vc, tcg_env, va, vb);
break; break;
case 0x21: case 0x21:
/* SUBG */ /* SUBG */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_subg(vc, cpu_env, va, vb); gen_helper_subg(vc, tcg_env, va, vb);
break; break;
case 0x22: case 0x22:
/* MULG */ /* MULG */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_mulg(vc, cpu_env, va, vb); gen_helper_mulg(vc, tcg_env, va, vb);
break; break;
case 0x23: case 0x23:
/* DIVG */ /* DIVG */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_divg(vc, cpu_env, va, vb); gen_helper_divg(vc, tcg_env, va, vb);
break; break;
case 0x25: case 0x25:
/* CMPGEQ */ /* CMPGEQ */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cmpgeq(vc, cpu_env, va, vb); gen_helper_cmpgeq(vc, tcg_env, va, vb);
break; break;
case 0x26: case 0x26:
/* CMPGLT */ /* CMPGLT */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cmpglt(vc, cpu_env, va, vb); gen_helper_cmpglt(vc, tcg_env, va, vb);
break; break;
case 0x27: case 0x27:
/* CMPGLE */ /* CMPGLE */
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cmpgle(vc, cpu_env, va, vb); gen_helper_cmpgle(vc, tcg_env, va, vb);
break; break;
case 0x2C: case 0x2C:
/* CVTGF */ /* CVTGF */
REQUIRE_REG_31(ra); REQUIRE_REG_31(ra);
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cvtgf(vc, cpu_env, vb); gen_helper_cvtgf(vc, tcg_env, vb);
break; break;
case 0x2D: case 0x2D:
/* CVTGD -- TODO */ /* CVTGD -- TODO */
@ -2079,19 +2079,19 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
/* CVTGQ */ /* CVTGQ */
REQUIRE_REG_31(ra); REQUIRE_REG_31(ra);
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cvtgq(vc, cpu_env, vb); gen_helper_cvtgq(vc, tcg_env, vb);
break; break;
case 0x3C: case 0x3C:
/* CVTQF */ /* CVTQF */
REQUIRE_REG_31(ra); REQUIRE_REG_31(ra);
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cvtqf(vc, cpu_env, vb); gen_helper_cvtqf(vc, tcg_env, vb);
break; break;
case 0x3E: case 0x3E:
/* CVTQG */ /* CVTQG */
REQUIRE_REG_31(ra); REQUIRE_REG_31(ra);
REQUIRE_FEN; REQUIRE_FEN;
gen_helper_cvtqg(vc, cpu_env, vb); gen_helper_cvtqg(vc, tcg_env, vb);
break; break;
default: default:
goto invalid_opc; goto invalid_opc;
@ -2242,7 +2242,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
/* MT_FPCR */ /* MT_FPCR */
REQUIRE_FEN; REQUIRE_FEN;
va = load_fpr(ctx, ra); va = load_fpr(ctx, ra);
gen_helper_store_fpcr(cpu_env, va); gen_helper_store_fpcr(tcg_env, va);
if (ctx->tb_rm == QUAL_RM_D) { if (ctx->tb_rm == QUAL_RM_D) {
/* Re-do the copy of the rounding mode to fp_status /* Re-do the copy of the rounding mode to fp_status
the next time we use dynamic rounding. */ the next time we use dynamic rounding. */
@ -2253,7 +2253,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
/* MF_FPCR */ /* MF_FPCR */
REQUIRE_FEN; REQUIRE_FEN;
va = dest_fpr(ctx, ra); va = dest_fpr(ctx, ra);
gen_helper_load_fpcr(va, cpu_env); gen_helper_load_fpcr(va, tcg_env);
break; break;
case 0x02A: case 0x02A:
/* FCMOVEQ */ /* FCMOVEQ */
@ -2292,7 +2292,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_FEN; REQUIRE_FEN;
vc = dest_fpr(ctx, rc); vc = dest_fpr(ctx, rc);
vb = load_fpr(ctx, rb); vb = load_fpr(ctx, rb);
gen_helper_cvtql(vc, cpu_env, vb); gen_helper_cvtql(vc, tcg_env, vb);
gen_fp_exc_raise(rc, fn11); gen_fp_exc_raise(rc, fn11);
break; break;
default: default:
@ -2332,7 +2332,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
if (translator_io_start(&ctx->base)) { if (translator_io_start(&ctx->base)) {
ret = DISAS_PC_STALE; ret = DISAS_PC_STALE;
} }
gen_helper_load_pcc(va, cpu_env); gen_helper_load_pcc(va, tcg_env);
break; break;
case 0xE000: case 0xE000:
/* RC */ /* RC */
@ -2628,7 +2628,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
address from EXC_ADDR. This turns out to be useful for our address from EXC_ADDR. This turns out to be useful for our
emulation PALcode, so continue to accept it. */ emulation PALcode, so continue to accept it. */
vb = dest_sink(ctx); vb = dest_sink(ctx);
tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr)); tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr));
} else { } else {
vb = load_gpr(ctx, rb); vb = load_gpr(ctx, rb);
} }
@ -2871,7 +2871,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUAlphaState *env = cpu->env_ptr; CPUAlphaState *env = cpu_env(cpu);
int64_t bound; int64_t bound;
ctx->tbflags = ctx->base.tb->flags; ctx->tbflags = ctx->base.tb->flags;
@ -2917,7 +2917,7 @@ static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUAlphaState *env = cpu->env_ptr; CPUAlphaState *env = cpu_env(cpu);
uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
ctx->base.pc_next += 4; ctx->base.pc_next += 4;

View File

@ -38,7 +38,7 @@ static inline void common_semi_set_ret(CPUState *cs, target_ulong ret)
static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr) static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr)
{ {
return (nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cs->env_ptr)); return nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cpu_env(cs));
} }
static inline bool is_64bit_semihosting(CPUArchState *env) static inline bool is_64bit_semihosting(CPUArchState *env)

View File

@ -31,18 +31,6 @@
# define TARGET_PAGE_BITS_VARY # define TARGET_PAGE_BITS_VARY
# define TARGET_PAGE_BITS_MIN 10 # define TARGET_PAGE_BITS_MIN 10
/*
* Cache the attrs and shareability fields from the page table entry.
*
* For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
* Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
* For shareability and guarded, as in the SH and GP fields respectively
* of the VMSAv8-64 PTEs.
*/
# define TARGET_PAGE_ENTRY_EXTRA \
uint8_t pte_attrs; \
uint8_t shareability; \
bool guarded;
#endif #endif
#endif #endif

View File

@ -80,7 +80,7 @@ void arm_cpu_synchronize_from_tb(CPUState *cs,
{ {
/* The program counter is always up to date with CF_PCREL. */ /* The program counter is always up to date with CF_PCREL. */
if (!(tb_cflags(tb) & CF_PCREL)) { if (!(tb_cflags(tb) & CF_PCREL)) {
CPUARMState *env = cs->env_ptr; CPUARMState *env = cpu_env(cs);
/* /*
* It's OK to look at env for the current mode here, because it's * It's OK to look at env for the current mode here, because it's
* never possible for an AArch64 TB to chain to an AArch32 TB. * never possible for an AArch64 TB to chain to an AArch32 TB.
@ -97,7 +97,7 @@ void arm_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb, const TranslationBlock *tb,
const uint64_t *data) const uint64_t *data)
{ {
CPUARMState *env = cs->env_ptr; CPUARMState *env = cpu_env(cs);
if (is_a64(env)) { if (is_a64(env)) {
if (tb_cflags(tb) & CF_PCREL) { if (tb_cflags(tb) & CF_PCREL) {
@ -560,7 +560,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int cur_el, bool secure, unsigned int cur_el, bool secure,
uint64_t hcr_el2) uint64_t hcr_el2)
{ {
CPUARMState *env = cs->env_ptr; CPUARMState *env = cpu_env(cs);
bool pstate_unmasked; bool pstate_unmasked;
bool unmasked = false; bool unmasked = false;
@ -690,7 +690,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{ {
CPUClass *cc = CPU_GET_CLASS(cs); CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = cs->env_ptr; CPUARMState *env = cpu_env(cs);
uint32_t cur_el = arm_current_el(env); uint32_t cur_el = arm_current_el(env);
bool secure = arm_is_secure(env); bool secure = arm_is_secure(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env); uint64_t hcr_el2 = arm_hcr_el2_eff(env);
@ -1215,7 +1215,6 @@ static void arm_cpu_initfn(Object *obj)
{ {
ARMCPU *cpu = ARM_CPU(obj); ARMCPU *cpu = ARM_CPU(obj);
cpu_set_cpustate_pointers(cpu);
cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal, cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL, g_free); NULL, g_free);
@ -2423,10 +2422,7 @@ void arm_cpu_register(const ARMCPUInfo *info)
{ {
TypeInfo type_info = { TypeInfo type_info = {
.parent = TYPE_ARM_CPU, .parent = TYPE_ARM_CPU,
.instance_size = sizeof(ARMCPU),
.instance_align = __alignof__(ARMCPU),
.instance_init = arm_cpu_instance_init, .instance_init = arm_cpu_instance_init,
.class_size = sizeof(ARMCPUClass),
.class_init = info->class_init ?: cpu_register_class_init, .class_init = info->class_init ?: cpu_register_class_init,
.class_data = (void *)info, .class_data = (void *)info,
}; };

View File

@ -856,7 +856,6 @@ struct ArchCPU {
CPUState parent_obj; CPUState parent_obj;
/*< public >*/ /*< public >*/
CPUNegativeOffsetState neg;
CPUARMState env; CPUARMState env;
/* Coprocessor information */ /* Coprocessor information */

View File

@ -822,9 +822,7 @@ void aarch64_cpu_register(const ARMCPUInfo *info)
{ {
TypeInfo type_info = { TypeInfo type_info = {
.parent = TYPE_AARCH64_CPU, .parent = TYPE_AARCH64_CPU,
.instance_size = sizeof(ARMCPU),
.instance_init = aarch64_cpu_instance_init, .instance_init = aarch64_cpu_instance_init,
.class_size = sizeof(ARMCPUClass),
.class_init = info->class_init ?: cpu_register_class_init, .class_init = info->class_init ?: cpu_register_class_init,
.class_data = (void *)info, .class_data = (void *)info,
}; };
@ -837,10 +835,8 @@ void aarch64_cpu_register(const ARMCPUInfo *info)
static const TypeInfo aarch64_cpu_type_info = { static const TypeInfo aarch64_cpu_type_info = {
.name = TYPE_AARCH64_CPU, .name = TYPE_AARCH64_CPU,
.parent = TYPE_ARM_CPU, .parent = TYPE_ARM_CPU,
.instance_size = sizeof(ARMCPU),
.instance_finalize = aarch64_cpu_finalizefn, .instance_finalize = aarch64_cpu_finalizefn,
.abstract = true, .abstract = true,
.class_size = sizeof(AArch64CPUClass),
.class_init = aarch64_cpu_class_init, .class_init = aarch64_cpu_class_init,
}; };

View File

@ -10297,7 +10297,7 @@ static const int8_t target_el_table[2][2][2][2][2][4] = {
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure) uint32_t cur_el, bool secure)
{ {
CPUARMState *env = cs->env_ptr; CPUARMState *env = cpu_env(cs);
bool rw; bool rw;
bool scr; bool scr;
bool hcr; bool hcr;

View File

@ -579,7 +579,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
} }
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE; ptw->out_rw = full->prot & PAGE_WRITE;
pte_attrs = full->pte_attrs; pte_attrs = full->extra.arm.pte_attrs;
ptw->out_space = full->attrs.space; ptw->out_space = full->attrs.space;
#else #else
g_assert_not_reached(); g_assert_not_reached();
@ -2036,7 +2036,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
result->f.guarded = extract64(attrs, 50, 1); /* GP */ result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
} }
} }

View File

@ -137,7 +137,7 @@ static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
assert(!(flags & TLB_INVALID_MASK)); assert(!(flags & TLB_INVALID_MASK));
/* If the virtual page MemAttr != Tagged, access unchecked. */ /* If the virtual page MemAttr != Tagged, access unchecked. */
if (full->pte_attrs != 0xf0) { if (full->extra.arm.pte_attrs != 0xf0) {
return NULL; return NULL;
} }

View File

@ -5373,7 +5373,7 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE); info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
#else #else
info->attrs = full->attrs; info->attrs = full->attrs;
info->tagged = full->pte_attrs == 0xf0; info->tagged = full->extra.arm.pte_attrs == 0xf0;
#endif #endif
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */ /* Ensure that info->host[] is relative to addr, not addr + mem_off. */

View File

@ -334,8 +334,8 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
} }
res.f.pte_attrs = res.cacheattrs.attrs; res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
res.f.shareability = res.cacheattrs.shareability; res.f.extra.arm.shareability = res.cacheattrs.shareability;
tlb_set_page_full(cs, mmu_idx, address, &res.f); tlb_set_page_full(cs, mmu_idx, address, &res.f);
return true; return true;

View File

@ -55,7 +55,7 @@ bool mve_skip_vmov(DisasContext *s, int vn, int index, int size);
static inline TCGv_i32 load_cpu_offset(int offset) static inline TCGv_i32 load_cpu_offset(int offset)
{ {
TCGv_i32 tmp = tcg_temp_new_i32(); TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_ld_i32(tmp, cpu_env, offset); tcg_gen_ld_i32(tmp, tcg_env, offset);
return tmp; return tmp;
} }

File diff suppressed because it is too large Load Diff

View File

@ -115,7 +115,7 @@ static inline int vec_full_reg_offset(DisasContext *s, int regno)
static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno) static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
{ {
TCGv_ptr ret = tcg_temp_new_ptr(); TCGv_ptr ret = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno)); tcg_gen_addi_ptr(ret, tcg_env, vec_full_reg_offset(s, regno));
return ret; return ret;
} }
@ -179,7 +179,7 @@ static inline int pred_gvec_reg_size(DisasContext *s)
static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
{ {
TCGv_ptr ret = tcg_temp_new_ptr(); TCGv_ptr ret = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno)); tcg_gen_addi_ptr(ret, tcg_env, pred_full_reg_offset(s, regno));
return ret; return ret;
} }

View File

@ -85,9 +85,9 @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
fptr = load_reg(s, a->rn); fptr = load_reg(s, a->rn);
if (a->l) { if (a->l) {
gen_helper_v7m_vlldm(cpu_env, fptr); gen_helper_v7m_vlldm(tcg_env, fptr);
} else { } else {
gen_helper_v7m_vlstm(cpu_env, fptr); gen_helper_v7m_vlstm(tcg_env, fptr);
} }
clear_eci_state(s); clear_eci_state(s);
@ -322,7 +322,7 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
switch (regno) { switch (regno) {
case ARM_VFP_FPSCR: case ARM_VFP_FPSCR:
tmp = loadfn(s, opaque, true); tmp = loadfn(s, opaque, true);
gen_helper_vfp_set_fpscr(cpu_env, tmp); gen_helper_vfp_set_fpscr(tcg_env, tmp);
gen_lookup_tb(s); gen_lookup_tb(s);
break; break;
case ARM_VFP_FPSCR_NZCVQC: case ARM_VFP_FPSCR_NZCVQC:
@ -391,7 +391,7 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
R_V7M_CONTROL_SFPA_SHIFT, 1); R_V7M_CONTROL_SFPA_SHIFT, 1);
store_cpu_field(control, v7m.control[M_REG_S]); store_cpu_field(control, v7m.control[M_REG_S]);
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
gen_helper_vfp_set_fpscr(cpu_env, tmp); gen_helper_vfp_set_fpscr(tcg_env, tmp);
s->base.is_jmp = DISAS_UPDATE_NOCHAIN; s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
break; break;
} }
@ -451,12 +451,12 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
switch (regno) { switch (regno) {
case ARM_VFP_FPSCR: case ARM_VFP_FPSCR:
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_vfp_get_fpscr(tmp, cpu_env); gen_helper_vfp_get_fpscr(tmp, tcg_env);
storefn(s, opaque, tmp, true); storefn(s, opaque, tmp, true);
break; break;
case ARM_VFP_FPSCR_NZCVQC: case ARM_VFP_FPSCR_NZCVQC:
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_vfp_get_fpscr(tmp, cpu_env); gen_helper_vfp_get_fpscr(tmp, tcg_env);
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK); tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
storefn(s, opaque, tmp, true); storefn(s, opaque, tmp, true);
break; break;
@ -475,7 +475,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
/* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */ /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
sfpa = tcg_temp_new_i32(); sfpa = tcg_temp_new_i32();
gen_helper_vfp_get_fpscr(tmp, cpu_env); gen_helper_vfp_get_fpscr(tmp, tcg_env);
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
control = load_cpu_field(v7m.control[M_REG_S]); control = load_cpu_field(v7m.control[M_REG_S]);
tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
@ -493,7 +493,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK); tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
store_cpu_field(control, v7m.control[M_REG_S]); store_cpu_field(control, v7m.control[M_REG_S]);
fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
gen_helper_vfp_set_fpscr(cpu_env, fpscr); gen_helper_vfp_set_fpscr(tcg_env, fpscr);
lookup_tb = true; lookup_tb = true;
break; break;
} }
@ -528,7 +528,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
sfpa = tcg_temp_new_i32(); sfpa = tcg_temp_new_i32();
fpscr = tcg_temp_new_i32(); fpscr = tcg_temp_new_i32();
gen_helper_vfp_get_fpscr(fpscr, cpu_env); gen_helper_vfp_get_fpscr(fpscr, tcg_env);
tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK); tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
control = load_cpu_field(v7m.control[M_REG_S]); control = load_cpu_field(v7m.control[M_REG_S]);
tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
@ -540,7 +540,7 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0), tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0),
fpdscr, fpscr); fpdscr, fpscr);
gen_helper_vfp_set_fpscr(cpu_env, fpscr); gen_helper_vfp_set_fpscr(tcg_env, fpscr);
break; break;
} }
case ARM_VFP_VPR: case ARM_VFP_VPR:
@ -643,7 +643,7 @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
} }
if (s->v8m_stackcheck && a->rn == 13 && a->w) { if (s->v8m_stackcheck && a->rn == 13 && a->w) {
gen_helper_v8m_stackcheck(cpu_env, addr); gen_helper_v8m_stackcheck(tcg_env, addr);
} }
if (do_access) { if (do_access) {
@ -682,7 +682,7 @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
} }
if (s->v8m_stackcheck && a->rn == 13 && a->w) { if (s->v8m_stackcheck && a->rn == 13 && a->w) {
gen_helper_v8m_stackcheck(cpu_env, addr); gen_helper_v8m_stackcheck(tcg_env, addr);
} }
if (do_access) { if (do_access) {

View File

@ -56,7 +56,7 @@ static inline long mve_qreg_offset(unsigned reg)
static TCGv_ptr mve_qreg_ptr(unsigned reg) static TCGv_ptr mve_qreg_ptr(unsigned reg)
{ {
TCGv_ptr ret = tcg_temp_new_ptr(); TCGv_ptr ret = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ret, cpu_env, mve_qreg_offset(reg)); tcg_gen_addi_ptr(ret, tcg_env, mve_qreg_offset(reg));
return ret; return ret;
} }
@ -173,7 +173,7 @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
} }
qreg = mve_qreg_ptr(a->qd); qreg = mve_qreg_ptr(a->qd);
fn(cpu_env, qreg, addr); fn(tcg_env, qreg, addr);
/* /*
* Writeback always happens after the last beat of the insn, * Writeback always happens after the last beat of the insn,
@ -234,7 +234,7 @@ static bool do_ldst_sg(DisasContext *s, arg_vldst_sg *a, MVEGenLdStSGFn fn)
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qd, qm, addr); fn(tcg_env, qd, qm, addr);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
} }
@ -330,7 +330,7 @@ static bool do_ldst_sg_imm(DisasContext *s, arg_vldst_sg_imm *a,
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qd, qm, tcg_constant_i32(offset)); fn(tcg_env, qd, qm, tcg_constant_i32(offset));
mve_update_eci(s); mve_update_eci(s);
return true; return true;
} }
@ -397,7 +397,7 @@ static bool do_vldst_il(DisasContext *s, arg_vldst_il *a, MVEGenLdStIlFn *fn,
* We pass the index of Qd, not a pointer, because the helper must * We pass the index of Qd, not a pointer, because the helper must
* access multiple Q registers starting at Qd and working up. * access multiple Q registers starting at Qd and working up.
*/ */
fn(cpu_env, tcg_constant_i32(a->qd), rn); fn(tcg_env, tcg_constant_i32(a->qd), rn);
if (a->w) { if (a->w) {
tcg_gen_addi_i32(rn, rn, addrinc); tcg_gen_addi_i32(rn, rn, addrinc);
@ -491,7 +491,7 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
} else { } else {
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
tcg_gen_dup_i32(a->size, rt, rt); tcg_gen_dup_i32(a->size, rt, rt);
gen_helper_mve_vdup(cpu_env, qd, rt); gen_helper_mve_vdup(tcg_env, qd, rt);
} }
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -517,7 +517,7 @@ static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn,
} else { } else {
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qd, qm); fn(tcg_env, qd, qm);
} }
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -612,7 +612,7 @@ static bool do_vcvt_rmode(DisasContext *s, arg_1op *a,
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode))); fn(tcg_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode)));
mve_update_eci(s); mve_update_eci(s);
return true; return true;
} }
@ -800,7 +800,7 @@ static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn,
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qn = mve_qreg_ptr(a->qn); qn = mve_qreg_ptr(a->qn);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qd, qn, qm); fn(tcg_env, qd, qn, qm);
} }
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -1052,7 +1052,7 @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qn = mve_qreg_ptr(a->qn); qn = mve_qreg_ptr(a->qn);
rm = load_reg(s, a->rm); rm = load_reg(s, a->rm);
fn(cpu_env, qd, qn, rm); fn(tcg_env, qd, qn, rm);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
} }
@ -1183,7 +1183,7 @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
rda_i = tcg_constant_i64(0); rda_i = tcg_constant_i64(0);
} }
fn(rda_o, cpu_env, qn, qm, rda_i); fn(rda_o, tcg_env, qn, qm, rda_i);
rdalo = tcg_temp_new_i32(); rdalo = tcg_temp_new_i32();
rdahi = tcg_temp_new_i32(); rdahi = tcg_temp_new_i32();
@ -1281,7 +1281,7 @@ static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
rda_o = tcg_temp_new_i32(); rda_o = tcg_temp_new_i32();
} }
fn(rda_o, cpu_env, qn, qm, rda_i); fn(rda_o, tcg_env, qn, qm, rda_i);
store_reg(s, a->rda, rda_o); store_reg(s, a->rda, rda_o);
mve_update_eci(s); mve_update_eci(s);
@ -1377,7 +1377,7 @@ static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a)
return true; return true;
} }
gen_helper_mve_vpnot(cpu_env); gen_helper_mve_vpnot(tcg_env);
/* This insn updates predication bits */ /* This insn updates predication bits */
s->base.is_jmp = DISAS_UPDATE_NOCHAIN; s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
mve_update_eci(s); mve_update_eci(s);
@ -1419,7 +1419,7 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
} }
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fns[a->size][a->u](rda_o, cpu_env, qm, rda_i); fns[a->size][a->u](rda_o, tcg_env, qm, rda_i);
store_reg(s, a->rda, rda_o); store_reg(s, a->rda, rda_o);
mve_update_eci(s); mve_update_eci(s);
@ -1471,9 +1471,9 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
if (a->u) { if (a->u) {
gen_helper_mve_vaddlv_u(rda_o, cpu_env, qm, rda_i); gen_helper_mve_vaddlv_u(rda_o, tcg_env, qm, rda_i);
} else { } else {
gen_helper_mve_vaddlv_s(rda_o, cpu_env, qm, rda_i); gen_helper_mve_vaddlv_s(rda_o, tcg_env, qm, rda_i);
} }
rdalo = tcg_temp_new_i32(); rdalo = tcg_temp_new_i32();
@ -1508,7 +1508,7 @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn,
imm, 16, 16); imm, 16, 16);
} else { } else {
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
fn(cpu_env, qd, tcg_constant_i64(imm)); fn(tcg_env, qd, tcg_constant_i64(imm));
} }
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -1580,7 +1580,7 @@ static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
} else { } else {
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qd, qm, tcg_constant_i32(shift)); fn(tcg_env, qd, qm, tcg_constant_i32(shift));
} }
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -1685,7 +1685,7 @@ static bool do_2shift_scalar(DisasContext *s, arg_shl_scalar *a,
qda = mve_qreg_ptr(a->qda); qda = mve_qreg_ptr(a->qda);
rm = load_reg(s, a->rm); rm = load_reg(s, a->rm);
fn(cpu_env, qda, qda, rm); fn(tcg_env, qda, qda, rm);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
} }
@ -1827,7 +1827,7 @@ static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
rdm = load_reg(s, a->rdm); rdm = load_reg(s, a->rdm);
gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm)); gen_helper_mve_vshlc(rdm, tcg_env, qd, rdm, tcg_constant_i32(a->imm));
store_reg(s, a->rdm, rdm); store_reg(s, a->rdm, rdm);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -1856,7 +1856,7 @@ static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn)
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
rn = load_reg(s, a->rn); rn = load_reg(s, a->rn);
fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm)); fn(rn, tcg_env, qd, rn, tcg_constant_i32(a->imm));
store_reg(s, a->rn, rn); store_reg(s, a->rn, rn);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -1891,7 +1891,7 @@ static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn)
qd = mve_qreg_ptr(a->qd); qd = mve_qreg_ptr(a->qd);
rn = load_reg(s, a->rn); rn = load_reg(s, a->rn);
rm = load_reg(s, a->rm); rm = load_reg(s, a->rm);
fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm)); fn(rn, tcg_env, qd, rn, rm, tcg_constant_i32(a->imm));
store_reg(s, a->rn, rn); store_reg(s, a->rn, rn);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -1957,7 +1957,7 @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
qn = mve_qreg_ptr(a->qn); qn = mve_qreg_ptr(a->qn);
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
fn(cpu_env, qn, qm); fn(tcg_env, qn, qm);
if (a->mask) { if (a->mask) {
/* VPT */ /* VPT */
gen_vpst(s, a->mask); gen_vpst(s, a->mask);
@ -1988,7 +1988,7 @@ static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a,
} else { } else {
rm = load_reg(s, a->rm); rm = load_reg(s, a->rm);
} }
fn(cpu_env, qn, rm); fn(tcg_env, qn, rm);
if (a->mask) { if (a->mask) {
/* VPT */ /* VPT */
gen_vpst(s, a->mask); gen_vpst(s, a->mask);
@ -2089,7 +2089,7 @@ static bool do_vmaxv(DisasContext *s, arg_vmaxv *a, MVEGenVADDVFn fn)
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
rda = load_reg(s, a->rda); rda = load_reg(s, a->rda);
fn(rda, cpu_env, qm, rda); fn(rda, tcg_env, qm, rda);
store_reg(s, a->rda, rda); store_reg(s, a->rda, rda);
mve_update_eci(s); mve_update_eci(s);
return true; return true;
@ -2153,7 +2153,7 @@ static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn)
qm = mve_qreg_ptr(a->qm); qm = mve_qreg_ptr(a->qm);
qn = mve_qreg_ptr(a->qn); qn = mve_qreg_ptr(a->qn);
rda = load_reg(s, a->rda); rda = load_reg(s, a->rda);
fn(rda, cpu_env, qn, qm, rda); fn(rda, tcg_env, qn, qm, rda);
store_reg(s, a->rda, rda); store_reg(s, a->rda, rda);
mve_update_eci(s); mve_update_eci(s);
return true; return true;

View File

@ -32,7 +32,7 @@
static TCGv_ptr vfp_reg_ptr(bool dp, int reg) static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
{ {
TCGv_ptr ret = tcg_temp_new_ptr(); TCGv_ptr ret = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg)); tcg_gen_addi_ptr(ret, tcg_env, vfp_reg_offset(dp, reg));
return ret; return ret;
} }
@ -42,13 +42,13 @@ static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
switch (mop) { switch (mop) {
case MO_UB: case MO_UB:
tcg_gen_ld8u_i32(var, cpu_env, offset); tcg_gen_ld8u_i32(var, tcg_env, offset);
break; break;
case MO_UW: case MO_UW:
tcg_gen_ld16u_i32(var, cpu_env, offset); tcg_gen_ld16u_i32(var, tcg_env, offset);
break; break;
case MO_UL: case MO_UL:
tcg_gen_ld_i32(var, cpu_env, offset); tcg_gen_ld_i32(var, tcg_env, offset);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -61,16 +61,16 @@ static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
switch (mop) { switch (mop) {
case MO_UB: case MO_UB:
tcg_gen_ld8u_i64(var, cpu_env, offset); tcg_gen_ld8u_i64(var, tcg_env, offset);
break; break;
case MO_UW: case MO_UW:
tcg_gen_ld16u_i64(var, cpu_env, offset); tcg_gen_ld16u_i64(var, tcg_env, offset);
break; break;
case MO_UL: case MO_UL:
tcg_gen_ld32u_i64(var, cpu_env, offset); tcg_gen_ld32u_i64(var, tcg_env, offset);
break; break;
case MO_UQ: case MO_UQ:
tcg_gen_ld_i64(var, cpu_env, offset); tcg_gen_ld_i64(var, tcg_env, offset);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -83,13 +83,13 @@ static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
switch (size) { switch (size) {
case MO_8: case MO_8:
tcg_gen_st8_i32(var, cpu_env, offset); tcg_gen_st8_i32(var, tcg_env, offset);
break; break;
case MO_16: case MO_16:
tcg_gen_st16_i32(var, cpu_env, offset); tcg_gen_st16_i32(var, tcg_env, offset);
break; break;
case MO_32: case MO_32:
tcg_gen_st_i32(var, cpu_env, offset); tcg_gen_st_i32(var, tcg_env, offset);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -102,16 +102,16 @@ static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
switch (size) { switch (size) {
case MO_8: case MO_8:
tcg_gen_st8_i64(var, cpu_env, offset); tcg_gen_st8_i64(var, tcg_env, offset);
break; break;
case MO_16: case MO_16:
tcg_gen_st16_i64(var, cpu_env, offset); tcg_gen_st16_i64(var, tcg_env, offset);
break; break;
case MO_32: case MO_32:
tcg_gen_st32_i64(var, cpu_env, offset); tcg_gen_st32_i64(var, tcg_env, offset);
break; break;
case MO_64: case MO_64:
tcg_gen_st_i64(var, cpu_env, offset); tcg_gen_st_i64(var, tcg_env, offset);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -296,7 +296,7 @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd), tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
vfp_reg_offset(a->q, a->vn), vfp_reg_offset(a->q, a->vn),
vfp_reg_offset(a->q, a->vm), vfp_reg_offset(a->q, a->vm),
cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */ tcg_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
gen_helper_gvec_fmlal_a32); gen_helper_gvec_fmlal_a32);
return true; return true;
} }
@ -390,7 +390,7 @@ static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd), tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
vfp_reg_offset(a->q, a->vn), vfp_reg_offset(a->q, a->vn),
vfp_reg_offset(a->q, a->rm), vfp_reg_offset(a->q, a->rm),
cpu_env, opr_sz, opr_sz, tcg_env, opr_sz, opr_sz,
(a->index << 2) | a->s, /* is_2 == 0 */ (a->index << 2) | a->s, /* is_2 == 0 */
gen_helper_gvec_fmlal_idx_a32); gen_helper_gvec_fmlal_idx_a32);
return true; return true;
@ -920,7 +920,7 @@ DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
#define DO_3SAME_64_ENV(INSN, FUNC) \ #define DO_3SAME_64_ENV(INSN, FUNC) \
static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \ static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
{ \ { \
FUNC(d, cpu_env, n, m); \ FUNC(d, tcg_env, n, m); \
} \ } \
DO_3SAME_64(INSN, gen_##INSN##_elt) DO_3SAME_64(INSN, gen_##INSN##_elt)
@ -953,7 +953,7 @@ DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
} }
/* /*
* Some helper functions need to be passed the cpu_env. In order * Some helper functions need to be passed the tcg_env. In order
* to use those with the gvec APIs like tcg_gen_gvec_3() we need * to use those with the gvec APIs like tcg_gen_gvec_3() we need
* to create wrapper functions whose prototype is a NeonGenTwoOpFn() * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
* and which call a NeonGenTwoOpEnvFn(). * and which call a NeonGenTwoOpEnvFn().
@ -961,7 +961,7 @@ DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
#define WRAP_ENV_FN(WRAPNAME, FUNC) \ #define WRAP_ENV_FN(WRAPNAME, FUNC) \
static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \ static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
{ \ { \
FUNC(d, cpu_env, n, m); \ FUNC(d, tcg_env, n, m); \
} }
#define DO_3SAME_32_ENV(INSN, FUNC) \ #define DO_3SAME_32_ENV(INSN, FUNC) \
@ -1305,7 +1305,7 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
{ {
/* /*
* 2-reg-and-shift operations, size == 3 case, where the * 2-reg-and-shift operations, size == 3 case, where the
* function needs to be passed cpu_env. * function needs to be passed tcg_env.
*/ */
TCGv_i64 constimm; TCGv_i64 constimm;
int pass; int pass;
@ -1338,7 +1338,7 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
TCGv_i64 tmp = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64();
read_neon_element64(tmp, a->vm, pass, MO_64); read_neon_element64(tmp, a->vm, pass, MO_64);
fn(tmp, cpu_env, tmp, constimm); fn(tmp, tcg_env, tmp, constimm);
write_neon_element64(tmp, a->vd, pass, MO_64); write_neon_element64(tmp, a->vd, pass, MO_64);
} }
return true; return true;
@ -1349,7 +1349,7 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
{ {
/* /*
* 2-reg-and-shift operations, size < 3 case, where the * 2-reg-and-shift operations, size < 3 case, where the
* helper needs to be passed cpu_env. * helper needs to be passed tcg_env.
*/ */
TCGv_i32 constimm, tmp; TCGv_i32 constimm, tmp;
int pass; int pass;
@ -1381,7 +1381,7 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
for (pass = 0; pass < (a->q ? 4 : 2); pass++) { for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
read_neon_element32(tmp, a->vm, pass, MO_32); read_neon_element32(tmp, a->vm, pass, MO_32);
fn(tmp, cpu_env, tmp, constimm); fn(tmp, tcg_env, tmp, constimm);
write_neon_element32(tmp, a->vd, pass, MO_32); write_neon_element32(tmp, a->vd, pass, MO_32);
} }
return true; return true;
@ -1447,11 +1447,11 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
read_neon_element64(rm2, a->vm, 1, MO_64); read_neon_element64(rm2, a->vm, 1, MO_64);
shiftfn(rm1, rm1, constimm); shiftfn(rm1, rm1, constimm);
narrowfn(rd, cpu_env, rm1); narrowfn(rd, tcg_env, rm1);
write_neon_element32(rd, a->vd, 0, MO_32); write_neon_element32(rd, a->vd, 0, MO_32);
shiftfn(rm2, rm2, constimm); shiftfn(rm2, rm2, constimm);
narrowfn(rd, cpu_env, rm2); narrowfn(rd, tcg_env, rm2);
write_neon_element32(rd, a->vd, 1, MO_32); write_neon_element32(rd, a->vd, 1, MO_32);
return true; return true;
@ -1514,7 +1514,7 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
tcg_gen_concat_i32_i64(rtmp, rm1, rm2); tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
narrowfn(rm1, cpu_env, rtmp); narrowfn(rm1, tcg_env, rtmp);
write_neon_element32(rm1, a->vd, 0, MO_32); write_neon_element32(rm1, a->vd, 0, MO_32);
shiftfn(rm3, rm3, constimm); shiftfn(rm3, rm3, constimm);
@ -1522,7 +1522,7 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
tcg_gen_concat_i32_i64(rtmp, rm3, rm4); tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
narrowfn(rm3, cpu_env, rtmp); narrowfn(rm3, tcg_env, rtmp);
write_neon_element32(rm3, a->vd, 1, MO_32); write_neon_element32(rm3, a->vd, 1, MO_32);
return true; return true;
} }
@ -2159,13 +2159,13 @@ DO_VMLAL(VMLSL_U,mull_u,sub)
static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
{ {
gen_helper_neon_mull_s16(rd, rn, rm); gen_helper_neon_mull_s16(rd, rn, rm);
gen_helper_neon_addl_saturate_s32(rd, cpu_env, rd, rd); gen_helper_neon_addl_saturate_s32(rd, tcg_env, rd, rd);
} }
static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
{ {
gen_mull_s32(rd, rn, rm); gen_mull_s32(rd, rn, rm);
gen_helper_neon_addl_saturate_s64(rd, cpu_env, rd, rd); gen_helper_neon_addl_saturate_s64(rd, tcg_env, rd, rd);
} }
static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a) static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
@ -2182,12 +2182,12 @@ static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{ {
gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm); gen_helper_neon_addl_saturate_s32(rd, tcg_env, rn, rm);
} }
static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{ {
gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm); gen_helper_neon_addl_saturate_s64(rd, tcg_env, rn, rm);
} }
static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a) static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
@ -2211,13 +2211,13 @@ static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{ {
gen_helper_neon_negl_u32(rm, rm); gen_helper_neon_negl_u32(rm, rm);
gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm); gen_helper_neon_addl_saturate_s32(rd, tcg_env, rn, rm);
} }
static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{ {
tcg_gen_neg_i64(rm, rm); tcg_gen_neg_i64(rm, rm);
gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm); gen_helper_neon_addl_saturate_s64(rd, tcg_env, rn, rm);
} }
static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a) static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
@ -2550,7 +2550,7 @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
for (pass = 0; pass < (a->q ? 4 : 2); pass++) { for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
read_neon_element32(rn, a->vn, pass, MO_32); read_neon_element32(rn, a->vn, pass, MO_32);
read_neon_element32(rd, a->vd, pass, MO_32); read_neon_element32(rd, a->vd, pass, MO_32);
opfn(rd, cpu_env, rn, scalar, rd); opfn(rd, tcg_env, rn, scalar, rd);
write_neon_element32(rd, a->vd, pass, MO_32); write_neon_element32(rd, a->vd, pass, MO_32);
} }
return true; return true;
@ -2837,7 +2837,7 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
val = tcg_temp_new_i64(); val = tcg_temp_new_i64();
read_neon_element64(val, a->vm, 0, MO_64); read_neon_element64(val, a->vm, 0, MO_64);
gen_helper_neon_tbl(val, cpu_env, desc, val, def); gen_helper_neon_tbl(val, tcg_env, desc, val, def);
write_neon_element64(val, a->vd, 0, MO_64); write_neon_element64(val, a->vd, 0, MO_64);
return true; return true;
} }
@ -3171,9 +3171,9 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
rd1 = tcg_temp_new_i32(); rd1 = tcg_temp_new_i32();
read_neon_element64(rm, a->vm, 0, MO_64); read_neon_element64(rm, a->vm, 0, MO_64);
narrowfn(rd0, cpu_env, rm); narrowfn(rd0, tcg_env, rm);
read_neon_element64(rm, a->vm, 1, MO_64); read_neon_element64(rm, a->vm, 1, MO_64);
narrowfn(rd1, cpu_env, rm); narrowfn(rd1, tcg_env, rm);
write_neon_element32(rd0, a->vd, 0, MO_32); write_neon_element32(rd0, a->vd, 0, MO_32);
write_neon_element32(rd1, a->vd, 1, MO_32); write_neon_element32(rd1, a->vd, 1, MO_32);
return true; return true;
@ -3625,7 +3625,7 @@ static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a)
#define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \ #define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \
static void WRAPNAME(TCGv_i32 d, TCGv_i32 m) \ static void WRAPNAME(TCGv_i32 d, TCGv_i32 m) \
{ \ { \
FUNC(d, cpu_env, m); \ FUNC(d, tcg_env, m); \
} }
WRAP_1OP_ENV_FN(gen_VQABS_s8, gen_helper_neon_qabs_s8) WRAP_1OP_ENV_FN(gen_VQABS_s8, gen_helper_neon_qabs_s8)

View File

@ -90,7 +90,7 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
/* Add the byte offset to env to produce the final pointer. */ /* Add the byte offset to env to produce the final pointer. */
addr = tcg_temp_new_ptr(); addr = tcg_temp_new_ptr();
tcg_gen_ext_i32_ptr(addr, tmp); tcg_gen_ext_i32_ptr(addr, tmp);
tcg_gen_add_ptr(addr, addr, cpu_env); tcg_gen_add_ptr(addr, addr, tcg_env);
return addr; return addr;
} }
@ -106,7 +106,7 @@ static TCGv_ptr get_tile(DisasContext *s, int esz, int tile)
offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, zarray); offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, zarray);
tcg_gen_addi_ptr(addr, cpu_env, offset); tcg_gen_addi_ptr(addr, tcg_env, offset);
return addr; return addr;
} }
@ -116,7 +116,7 @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
return false; return false;
} }
if (sme_za_enabled_check(s)) { if (sme_za_enabled_check(s)) {
gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm), gen_helper_sme_zero(tcg_env, tcg_constant_i32(a->imm),
tcg_constant_i32(streaming_vec_reg_size(s))); tcg_constant_i32(streaming_vec_reg_size(s)));
} }
return true; return true;
@ -237,7 +237,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
svl = streaming_vec_reg_size(s); svl = streaming_vec_reg_size(s);
desc = simd_desc(svl, svl, desc); desc = simd_desc(svl, svl, desc);
fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, fns[a->esz][be][a->v][mte][a->st](tcg_env, t_za, t_pg, addr,
tcg_constant_i32(desc)); tcg_constant_i32(desc));
return true; return true;
} }

View File

@ -497,8 +497,8 @@ static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
TCGv_ptr gptr = tcg_temp_new_ptr(); TCGv_ptr gptr = tcg_temp_new_ptr();
TCGv_i32 t = tcg_temp_new_i32(); TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_addi_ptr(dptr, cpu_env, dofs); tcg_gen_addi_ptr(dptr, tcg_env, dofs);
tcg_gen_addi_ptr(gptr, cpu_env, gofs); tcg_gen_addi_ptr(gptr, tcg_env, gofs);
gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words));
@ -956,8 +956,8 @@ static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
t_zn = tcg_temp_new_ptr(); t_zn = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
fn(temp, t_zn, t_pg, desc); fn(temp, t_zn, t_pg, desc);
write_fp_dreg(s, a->rd, temp); write_fp_dreg(s, a->rd, temp);
@ -1209,7 +1209,7 @@ static bool do_index(DisasContext *s, int esz, int rd,
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
t_zd = tcg_temp_new_ptr(); t_zd = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd)); tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd));
if (esz == 3) { if (esz == 3) {
gen_helper_sve_index_d(t_zd, start, incr, desc); gen_helper_sve_index_d(t_zd, start, incr, desc);
} else { } else {
@ -1379,12 +1379,12 @@ static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
TCGv_i64 pm = tcg_temp_new_i64(); TCGv_i64 pm = tcg_temp_new_i64();
TCGv_i64 pg = tcg_temp_new_i64(); TCGv_i64 pg = tcg_temp_new_i64();
tcg_gen_ld_i64(pn, cpu_env, nofs); tcg_gen_ld_i64(pn, tcg_env, nofs);
tcg_gen_ld_i64(pm, cpu_env, mofs); tcg_gen_ld_i64(pm, tcg_env, mofs);
tcg_gen_ld_i64(pg, cpu_env, gofs); tcg_gen_ld_i64(pg, tcg_env, gofs);
gvec_op->fni8(pd, pn, pm, pg); gvec_op->fni8(pd, pn, pm, pg);
tcg_gen_st_i64(pd, cpu_env, dofs); tcg_gen_st_i64(pd, tcg_env, dofs);
do_predtest1(pd, pg); do_predtest1(pd, pg);
} else { } else {
@ -1654,8 +1654,8 @@ static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
TCGv_i64 pn = tcg_temp_new_i64(); TCGv_i64 pn = tcg_temp_new_i64();
TCGv_i64 pg = tcg_temp_new_i64(); TCGv_i64 pg = tcg_temp_new_i64();
tcg_gen_ld_i64(pn, cpu_env, nofs); tcg_gen_ld_i64(pn, tcg_env, nofs);
tcg_gen_ld_i64(pg, cpu_env, gofs); tcg_gen_ld_i64(pg, tcg_env, gofs);
do_predtest1(pn, pg); do_predtest1(pn, pg);
} else { } else {
do_predtest(s, nofs, gofs, words); do_predtest(s, nofs, gofs, words);
@ -1736,7 +1736,7 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
t = tcg_temp_new_i64(); t = tcg_temp_new_i64();
if (fullsz <= 64) { if (fullsz <= 64) {
tcg_gen_movi_i64(t, lastword); tcg_gen_movi_i64(t, lastword);
tcg_gen_st_i64(t, cpu_env, ofs); tcg_gen_st_i64(t, tcg_env, ofs);
goto done; goto done;
} }
@ -1755,17 +1755,17 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
tcg_gen_movi_i64(t, word); tcg_gen_movi_i64(t, word);
for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) { for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
tcg_gen_st_i64(t, cpu_env, ofs + i); tcg_gen_st_i64(t, tcg_env, ofs + i);
} }
if (lastword != word) { if (lastword != word) {
tcg_gen_movi_i64(t, lastword); tcg_gen_movi_i64(t, lastword);
tcg_gen_st_i64(t, cpu_env, ofs + i); tcg_gen_st_i64(t, tcg_env, ofs + i);
i += 8; i += 8;
} }
if (i < fullsz) { if (i < fullsz) {
tcg_gen_movi_i64(t, 0); tcg_gen_movi_i64(t, 0);
for (; i < fullsz; i += 8) { for (; i < fullsz; i += 8) {
tcg_gen_st_i64(t, cpu_env, ofs + i); tcg_gen_st_i64(t, tcg_env, ofs + i);
} }
} }
@ -1822,8 +1822,8 @@ static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->rn));
t = tcg_temp_new_i32(); t = tcg_temp_new_i32();
gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc));
@ -1919,8 +1919,8 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
dptr = tcg_temp_new_ptr(); dptr = tcg_temp_new_ptr();
nptr = tcg_temp_new_ptr(); nptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd)); tcg_gen_addi_ptr(dptr, tcg_env, vec_full_reg_offset(s, rd));
tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn)); tcg_gen_addi_ptr(nptr, tcg_env, vec_full_reg_offset(s, rn));
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
switch (esz) { switch (esz) {
@ -2163,9 +2163,9 @@ static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
TCGv_ptr t_zn = tcg_temp_new_ptr(); TCGv_ptr t_zn = tcg_temp_new_ptr();
TCGv_ptr t_pg = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd)); tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd));
tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn)); tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, rn));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
fns[esz](t_zd, t_zn, t_pg, val, desc); fns[esz](t_zd, t_zn, t_pg, val, desc);
} }
@ -2310,8 +2310,8 @@ static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
TCGv_ptr t_zd = tcg_temp_new_ptr(); TCGv_ptr t_zd = tcg_temp_new_ptr();
TCGv_ptr t_zn = tcg_temp_new_ptr(); TCGv_ptr t_zn = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
fns[a->esz](t_zd, t_zn, val, desc); fns[a->esz](t_zd, t_zn, val, desc);
} }
@ -2323,7 +2323,7 @@ static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
} }
if (sve_access_check(s)) { if (sve_access_check(s)) {
TCGv_i64 t = tcg_temp_new_i64(); TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); tcg_gen_ld_i64(t, tcg_env, vec_reg_offset(s, a->rm, 0, MO_64));
do_insr_i64(s, a, t); do_insr_i64(s, a, t);
} }
return true; return true;
@ -2409,9 +2409,9 @@ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(t_m, tcg_env, pred_full_reg_offset(s, a->rm));
fn(t_d, t_n, t_m, tcg_constant_i32(desc)); fn(t_d, t_n, t_m, tcg_constant_i32(desc));
return true; return true;
@ -2429,8 +2429,8 @@ static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
TCGv_ptr t_n = tcg_temp_new_ptr(); TCGv_ptr t_n = tcg_temp_new_ptr();
uint32_t desc = 0; uint32_t desc = 0;
tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn));
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
@ -2525,7 +2525,7 @@ static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_p, tcg_env, pred_full_reg_offset(s, pg));
gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc));
} }
@ -2602,7 +2602,7 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
} }
#endif #endif
tcg_gen_ext_i32_ptr(p, last); tcg_gen_ext_i32_ptr(p, last);
tcg_gen_add_ptr(p, p, cpu_env); tcg_gen_add_ptr(p, p, tcg_env);
return load_esz(p, vec_full_reg_offset(s, rm), esz); return load_esz(p, vec_full_reg_offset(s, rm), esz);
} }
@ -2674,7 +2674,7 @@ static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
} }
/* The conceit here is that while last < 0 indicates not found, after /* The conceit here is that while last < 0 indicates not found, after
* adjusting for cpu_env->vfp.zregs[rm], it is still a valid address * adjusting for tcg_env->vfp.zregs[rm], it is still a valid address
* from which we can load garbage. We then discard the garbage with * from which we can load garbage. We then discard the garbage with
* a conditional move. * a conditional move.
*/ */
@ -2690,7 +2690,7 @@ static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
if (sve_access_check(s)) { if (sve_access_check(s)) {
int esz = a->esz; int esz = a->esz;
int ofs = vec_reg_offset(s, a->rd, 0, esz); int ofs = vec_reg_offset(s, a->rd, 0, esz);
TCGv_i64 reg = load_esz(cpu_env, ofs, esz); TCGv_i64 reg = load_esz(tcg_env, ofs, esz);
do_clast_scalar(s, esz, a->pg, a->rn, before, reg); do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
write_fp_dreg(s, a->rd, reg); write_fp_dreg(s, a->rd, reg);
@ -2794,7 +2794,7 @@ static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
} }
if (sve_access_check(s)) { if (sve_access_check(s)) {
int ofs = vec_reg_offset(s, a->rn, 0, a->esz); int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
TCGv_i64 t = load_esz(cpu_env, ofs, a->esz); TCGv_i64 t = load_esz(tcg_env, ofs, a->esz);
do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
} }
return true; return true;
@ -2847,10 +2847,10 @@ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
zm = tcg_temp_new_ptr(); zm = tcg_temp_new_ptr();
pg = tcg_temp_new_ptr(); pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(zm, tcg_env, vec_full_reg_offset(s, a->rm));
tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg));
gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0)));
@ -2920,9 +2920,9 @@ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
zn = tcg_temp_new_ptr(); zn = tcg_temp_new_ptr();
pg = tcg_temp_new_ptr(); pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg));
gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm)));
@ -2971,10 +2971,10 @@ static bool do_brk3(DisasContext *s, arg_rprr_s *a,
TCGv_ptr g = tcg_temp_new_ptr(); TCGv_ptr g = tcg_temp_new_ptr();
TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(m, tcg_env, pred_full_reg_offset(s, a->rm));
tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg));
if (a->s) { if (a->s) {
TCGv_i32 t = tcg_temp_new_i32(); TCGv_i32 t = tcg_temp_new_i32();
@ -3001,9 +3001,9 @@ static bool do_brk2(DisasContext *s, arg_rpr_s *a,
TCGv_ptr g = tcg_temp_new_ptr(); TCGv_ptr g = tcg_temp_new_ptr();
TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd));
tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg));
if (a->s) { if (a->s) {
TCGv_i32 t = tcg_temp_new_i32(); TCGv_i32 t = tcg_temp_new_i32();
@ -3044,10 +3044,10 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
if (psz <= 8) { if (psz <= 8) {
uint64_t psz_mask; uint64_t psz_mask;
tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn)); tcg_gen_ld_i64(val, tcg_env, pred_full_reg_offset(s, pn));
if (pn != pg) { if (pn != pg) {
TCGv_i64 g = tcg_temp_new_i64(); TCGv_i64 g = tcg_temp_new_i64();
tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_ld_i64(g, tcg_env, pred_full_reg_offset(s, pg));
tcg_gen_and_i64(val, val, g); tcg_gen_and_i64(val, val, g);
} }
@ -3066,8 +3066,8 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz); desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn)); tcg_gen_addi_ptr(t_pn, tcg_env, pred_full_reg_offset(s, pn));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc));
} }
@ -3291,7 +3291,7 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
ptr = tcg_temp_new_ptr(); ptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
if (a->lt) { if (a->lt) {
gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
@ -3354,7 +3354,7 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
ptr = tcg_temp_new_ptr(); ptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
do_pred_flags(t2); do_pred_flags(t2);
@ -3684,8 +3684,8 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a,
t_zn = tcg_temp_new_ptr(); t_zn = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
fn(temp, t_zn, t_pg, status, t_desc); fn(temp, t_zn, t_pg, status, t_desc);
@ -3802,11 +3802,11 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
return true; return true;
} }
t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz); t_val = load_esz(tcg_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
t_rm = tcg_temp_new_ptr(); t_rm = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
@ -3878,9 +3878,9 @@ static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
t_zd = tcg_temp_new_ptr(); t_zd = tcg_temp_new_ptr();
t_zn = tcg_temp_new_ptr(); t_zn = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd)); tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd));
tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn)); tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn));
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
@ -4228,7 +4228,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
/* /*
* Predicate register loads can be any multiple of 2. * Predicate register loads can be any multiple of 2.
* Note that we still store the entire 64-bit unit into cpu_env. * Note that we still store the entire 64-bit unit into tcg_env.
*/ */
if (len_remain >= 8) { if (len_remain >= 8) {
t0 = tcg_temp_new_i64(); t0 = tcg_temp_new_i64();
@ -4370,7 +4370,7 @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) { if (sve_access_check(s)) {
int size = vec_full_reg_size(s); int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd); int off = vec_full_reg_offset(s, a->rd);
gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size);
} }
return true; return true;
} }
@ -4383,7 +4383,7 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) { if (sve_access_check(s)) {
int size = pred_full_reg_size(s); int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd); int off = pred_full_reg_offset(s, a->rd);
gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size);
} }
return true; return true;
} }
@ -4396,7 +4396,7 @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) { if (sve_access_check(s)) {
int size = vec_full_reg_size(s); int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd); int off = vec_full_reg_offset(s, a->rd);
gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size);
} }
return true; return true;
} }
@ -4409,7 +4409,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) { if (sve_access_check(s)) {
int size = pred_full_reg_size(s); int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd); int off = pred_full_reg_offset(s, a->rd);
gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size);
} }
return true; return true;
} }
@ -4465,8 +4465,8 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
desc = simd_desc(vsz, vsz, zt | desc); desc = simd_desc(vsz, vsz, zt | desc);
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
fn(cpu_env, t_pg, addr, tcg_constant_i32(desc)); fn(tcg_env, t_pg, addr, tcg_constant_i32(desc));
} }
/* Indexed by [mte][be][dtype][nreg] */ /* Indexed by [mte][be][dtype][nreg] */
@ -4860,18 +4860,18 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
#if HOST_BIG_ENDIAN #if HOST_BIG_ENDIAN
poff += 6; poff += 6;
#endif #endif
tcg_gen_ld16u_i64(tmp, cpu_env, poff); tcg_gen_ld16u_i64(tmp, tcg_env, poff);
poff = offsetof(CPUARMState, vfp.preg_tmp); poff = offsetof(CPUARMState, vfp.preg_tmp);
tcg_gen_st_i64(tmp, cpu_env, poff); tcg_gen_st_i64(tmp, tcg_env, poff);
} }
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, cpu_env, poff); tcg_gen_addi_ptr(t_pg, tcg_env, poff);
gen_helper_gvec_mem *fn gen_helper_gvec_mem *fn
= ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt))); fn(tcg_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
/* Replicate that first quadword. */ /* Replicate that first quadword. */
if (vsz > 16) { if (vsz > 16) {
@ -4939,18 +4939,18 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
#if HOST_BIG_ENDIAN #if HOST_BIG_ENDIAN
poff += 4; poff += 4;
#endif #endif
tcg_gen_ld32u_i64(tmp, cpu_env, poff); tcg_gen_ld32u_i64(tmp, tcg_env, poff);
poff = offsetof(CPUARMState, vfp.preg_tmp); poff = offsetof(CPUARMState, vfp.preg_tmp);
tcg_gen_st_i64(tmp, cpu_env, poff); tcg_gen_st_i64(tmp, tcg_env, poff);
} }
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, cpu_env, poff); tcg_gen_addi_ptr(t_pg, tcg_env, poff);
gen_helper_gvec_mem *fn gen_helper_gvec_mem *fn
= ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt))); fn(tcg_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
/* /*
* Replicate that first octaword. * Replicate that first octaword.
@ -5027,7 +5027,7 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
*/ */
uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8); uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
temp = tcg_temp_new_i64(); temp = tcg_temp_new_i64();
tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_ld_i64(temp, tcg_env, pred_full_reg_offset(s, a->pg));
tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask); tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over); tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
} else { } else {
@ -5238,10 +5238,10 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
} }
desc = simd_desc(vsz, vsz, desc | scale); desc = simd_desc(vsz, vsz, desc | scale);
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm));
tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt));
fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
} }
/* Indexed by [mte][be][ff][xs][u][msz]. */ /* Indexed by [mte][be][ff][xs][u][msz]. */
@ -7197,7 +7197,7 @@ static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
{ {
return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s, return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s,
a->rd, a->rn, a->rm, a->ra, a->rd, a->rn, a->rm, a->ra,
(sel << 1) | sub, cpu_env); (sel << 1) | sub, tcg_env);
} }
TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false) TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false)
@ -7209,7 +7209,7 @@ static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
{ {
return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s, return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
a->rd, a->rn, a->rm, a->ra, a->rd, a->rn, a->rm, a->ra,
(a->index << 2) | (sel << 1) | sub, cpu_env); (a->index << 2) | (sel << 1) | sub, tcg_env);
} }
TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false) TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false)
@ -7289,7 +7289,7 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
/* Load the predicate word. */ /* Load the predicate word. */
tcg_gen_trunc_i64_ptr(ptr, didx); tcg_gen_trunc_i64_ptr(ptr, didx);
tcg_gen_add_ptr(ptr, ptr, cpu_env); tcg_gen_add_ptr(ptr, ptr, tcg_env);
tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
/* Extract the predicate bit and replicate to MO_64. */ /* Extract the predicate bit and replicate to MO_64. */

View File

@ -30,22 +30,22 @@
static inline void vfp_load_reg64(TCGv_i64 var, int reg) static inline void vfp_load_reg64(TCGv_i64 var, int reg)
{ {
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg)); tcg_gen_ld_i64(var, tcg_env, vfp_reg_offset(true, reg));
} }
static inline void vfp_store_reg64(TCGv_i64 var, int reg) static inline void vfp_store_reg64(TCGv_i64 var, int reg)
{ {
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg)); tcg_gen_st_i64(var, tcg_env, vfp_reg_offset(true, reg));
} }
static inline void vfp_load_reg32(TCGv_i32 var, int reg) static inline void vfp_load_reg32(TCGv_i32 var, int reg)
{ {
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg)); tcg_gen_ld_i32(var, tcg_env, vfp_reg_offset(false, reg));
} }
static inline void vfp_store_reg32(TCGv_i32 var, int reg) static inline void vfp_store_reg32(TCGv_i32 var, int reg)
{ {
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); tcg_gen_st_i32(var, tcg_env, vfp_reg_offset(false, reg));
} }
/* /*
@ -116,7 +116,7 @@ static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
if (translator_io_start(&s->base)) { if (translator_io_start(&s->base)) {
s->base.is_jmp = DISAS_UPDATE_EXIT; s->base.is_jmp = DISAS_UPDATE_EXIT;
} }
gen_helper_v7m_preserve_fp_state(cpu_env); gen_helper_v7m_preserve_fp_state(tcg_env);
/* /*
* If the preserve_fp_state helper doesn't throw an exception * If the preserve_fp_state helper doesn't throw an exception
* then it will clear LSPACT; we don't need to repeat this for * then it will clear LSPACT; we don't need to repeat this for
@ -172,7 +172,7 @@ static void gen_update_fp_context(DisasContext *s)
uint32_t bits = R_V7M_CONTROL_FPCA_MASK; uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
gen_helper_vfp_set_fpscr(cpu_env, fpscr); gen_helper_vfp_set_fpscr(tcg_env, fpscr);
if (dc_isar_feature(aa32_mve, s)) { if (dc_isar_feature(aa32_mve, s)) {
store_cpu_field(tcg_constant_i32(0), v7m.vpr); store_cpu_field(tcg_constant_i32(0), v7m.vpr);
} }
@ -815,7 +815,7 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
if (s->current_el == 1) { if (s->current_el == 1) {
gen_set_condexec(s); gen_set_condexec(s);
gen_update_pc(s, 0); gen_update_pc(s, 0);
gen_helper_check_hcr_el2_trap(cpu_env, gen_helper_check_hcr_el2_trap(tcg_env,
tcg_constant_i32(a->rt), tcg_constant_i32(a->rt),
tcg_constant_i32(a->reg)); tcg_constant_i32(a->reg));
} }
@ -831,7 +831,7 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK); tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
} else { } else {
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_vfp_get_fpscr(tmp, cpu_env); gen_helper_vfp_get_fpscr(tmp, tcg_env);
} }
break; break;
default: default:
@ -855,7 +855,7 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
break; break;
case ARM_VFP_FPSCR: case ARM_VFP_FPSCR:
tmp = load_reg(s, a->rt); tmp = load_reg(s, a->rt);
gen_helper_vfp_set_fpscr(cpu_env, tmp); gen_helper_vfp_set_fpscr(tcg_env, tmp);
gen_lookup_tb(s); gen_lookup_tb(s);
break; break;
case ARM_VFP_FPEXC: case ARM_VFP_FPEXC:
@ -1169,7 +1169,7 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
* value is above, it is UNKNOWN whether the limit check * value is above, it is UNKNOWN whether the limit check
* triggers; we choose to trigger. * triggers; we choose to trigger.
*/ */
gen_helper_v8m_stackcheck(cpu_env, addr); gen_helper_v8m_stackcheck(tcg_env, addr);
} }
offset = 4; offset = 4;
@ -1252,7 +1252,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
* value is above, it is UNKNOWN whether the limit check * value is above, it is UNKNOWN whether the limit check
* triggers; we choose to trigger. * triggers; we choose to trigger.
*/ */
gen_helper_v8m_stackcheck(cpu_env, addr); gen_helper_v8m_stackcheck(tcg_env, addr);
} }
offset = 8; offset = 8;
@ -2419,17 +2419,17 @@ DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd, aa32_fpdp_v2)
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm) static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
{ {
gen_helper_vfp_sqrth(vd, vm, cpu_env); gen_helper_vfp_sqrth(vd, vm, tcg_env);
} }
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm) static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
{ {
gen_helper_vfp_sqrts(vd, vm, cpu_env); gen_helper_vfp_sqrts(vd, vm, tcg_env);
} }
static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm) static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
{ {
gen_helper_vfp_sqrtd(vd, vm, cpu_env); gen_helper_vfp_sqrtd(vd, vm, tcg_env);
} }
DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith) DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
@ -2464,9 +2464,9 @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
} }
if (a->e) { if (a->e) {
gen_helper_vfp_cmpeh(vd, vm, cpu_env); gen_helper_vfp_cmpeh(vd, vm, tcg_env);
} else { } else {
gen_helper_vfp_cmph(vd, vm, cpu_env); gen_helper_vfp_cmph(vd, vm, tcg_env);
} }
return true; return true;
} }
@ -2499,9 +2499,9 @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
} }
if (a->e) { if (a->e) {
gen_helper_vfp_cmpes(vd, vm, cpu_env); gen_helper_vfp_cmpes(vd, vm, tcg_env);
} else { } else {
gen_helper_vfp_cmps(vd, vm, cpu_env); gen_helper_vfp_cmps(vd, vm, tcg_env);
} }
return true; return true;
} }
@ -2539,9 +2539,9 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
} }
if (a->e) { if (a->e) {
gen_helper_vfp_cmped(vd, vm, cpu_env); gen_helper_vfp_cmped(vd, vm, tcg_env);
} else { } else {
gen_helper_vfp_cmpd(vd, vm, cpu_env); gen_helper_vfp_cmpd(vd, vm, tcg_env);
} }
return true; return true;
} }
@ -2564,7 +2564,7 @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
ahp_mode = get_ahp_flag(); ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
/* The T bit tells us if we want the low or high 16 bits of Vm */ /* The T bit tells us if we want the low or high 16 bits of Vm */
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); tcg_gen_ld16u_i32(tmp, tcg_env, vfp_f16_offset(a->vm, a->t));
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
vfp_store_reg32(tmp, a->vd); vfp_store_reg32(tmp, a->vd);
return true; return true;
@ -2598,7 +2598,7 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
ahp_mode = get_ahp_flag(); ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
/* The T bit tells us if we want the low or high 16 bits of Vm */ /* The T bit tells us if we want the low or high 16 bits of Vm */
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); tcg_gen_ld16u_i32(tmp, tcg_env, vfp_f16_offset(a->vm, a->t));
vd = tcg_temp_new_i64(); vd = tcg_temp_new_i64();
gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode); gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
vfp_store_reg64(vd, a->vd); vfp_store_reg64(vd, a->vd);
@ -2623,7 +2623,7 @@ static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
vfp_load_reg32(tmp, a->vm); vfp_load_reg32(tmp, a->vm);
gen_helper_bfcvt(tmp, tmp, fpst); gen_helper_bfcvt(tmp, tmp, fpst);
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); tcg_gen_st16_i32(tmp, tcg_env, vfp_f16_offset(a->vd, a->t));
return true; return true;
} }
@ -2647,7 +2647,7 @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
vfp_load_reg32(tmp, a->vm); vfp_load_reg32(tmp, a->vm);
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); tcg_gen_st16_i32(tmp, tcg_env, vfp_f16_offset(a->vd, a->t));
return true; return true;
} }
@ -2682,7 +2682,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
vfp_load_reg64(vm, a->vm); vfp_load_reg64(vm, a->vm);
gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode); gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); tcg_gen_st16_i32(tmp, tcg_env, vfp_f16_offset(a->vd, a->t));
return true; return true;
} }
@ -2932,7 +2932,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
vm = tcg_temp_new_i32(); vm = tcg_temp_new_i32();
vd = tcg_temp_new_i64(); vd = tcg_temp_new_i64();
vfp_load_reg32(vm, a->vm); vfp_load_reg32(vm, a->vm);
gen_helper_vfp_fcvtds(vd, vm, cpu_env); gen_helper_vfp_fcvtds(vd, vm, tcg_env);
vfp_store_reg64(vd, a->vd); vfp_store_reg64(vd, a->vd);
return true; return true;
} }
@ -2958,7 +2958,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
vd = tcg_temp_new_i32(); vd = tcg_temp_new_i32();
vm = tcg_temp_new_i64(); vm = tcg_temp_new_i64();
vfp_load_reg64(vm, a->vm); vfp_load_reg64(vm, a->vm);
gen_helper_vfp_fcvtsd(vd, vm, cpu_env); gen_helper_vfp_fcvtsd(vd, vm, tcg_env);
vfp_store_reg32(vd, a->vd); vfp_store_reg32(vd, a->vd);
return true; return true;
} }
@ -3076,7 +3076,7 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
vm = tcg_temp_new_i64(); vm = tcg_temp_new_i64();
vd = tcg_temp_new_i32(); vd = tcg_temp_new_i32();
vfp_load_reg64(vm, a->vm); vfp_load_reg64(vm, a->vm);
gen_helper_vjcvt(vd, vm, cpu_env); gen_helper_vjcvt(vd, vm, tcg_env);
vfp_store_reg32(vd, a->vd); vfp_store_reg32(vd, a->vd);
return true; return true;
} }

View File

@ -63,18 +63,18 @@ void arm_translate_init(void)
int i; int i;
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env, cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUARMState, regs[i]), offsetof(CPUARMState, regs[i]),
regnames[i]); regnames[i]);
} }
cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF"); cpu_CF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, CF), "CF");
cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF"); cpu_NF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, NF), "NF");
cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF"); cpu_VF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, VF), "VF");
cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF"); cpu_ZF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, ZF), "ZF");
cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env, cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env, cpu_exclusive_val = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, exclusive_val), "exclusive_val"); offsetof(CPUARMState, exclusive_val), "exclusive_val");
a64_translate_init(); a64_translate_init();
@ -179,10 +179,10 @@ void store_cpu_offset(TCGv_i32 var, int offset, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
tcg_gen_st8_i32(var, cpu_env, offset); tcg_gen_st8_i32(var, tcg_env, offset);
break; break;
case 4: case 4:
tcg_gen_st_i32(var, cpu_env, offset); tcg_gen_st_i32(var, tcg_env, offset);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -329,7 +329,7 @@ static void store_sp_checked(DisasContext *s, TCGv_i32 var)
{ {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
if (s->v8m_stackcheck) { if (s->v8m_stackcheck) {
gen_helper_v8m_stackcheck(cpu_env, var); gen_helper_v8m_stackcheck(tcg_env, var);
} }
#endif #endif
store_reg(s, 13, var); store_reg(s, 13, var);
@ -346,7 +346,7 @@ static void store_sp_checked(DisasContext *s, TCGv_i32 var)
void gen_set_cpsr(TCGv_i32 var, uint32_t mask) void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
{ {
gen_helper_cpsr_write(cpu_env, var, tcg_constant_i32(mask)); gen_helper_cpsr_write(tcg_env, var, tcg_constant_i32(mask));
} }
static void gen_rebuild_hflags(DisasContext *s, bool new_el) static void gen_rebuild_hflags(DisasContext *s, bool new_el)
@ -355,16 +355,16 @@ static void gen_rebuild_hflags(DisasContext *s, bool new_el)
if (new_el) { if (new_el) {
if (m_profile) { if (m_profile) {
gen_helper_rebuild_hflags_m32_newel(cpu_env); gen_helper_rebuild_hflags_m32_newel(tcg_env);
} else { } else {
gen_helper_rebuild_hflags_a32_newel(cpu_env); gen_helper_rebuild_hflags_a32_newel(tcg_env);
} }
} else { } else {
TCGv_i32 tcg_el = tcg_constant_i32(s->current_el); TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
if (m_profile) { if (m_profile) {
gen_helper_rebuild_hflags_m32(cpu_env, tcg_el); gen_helper_rebuild_hflags_m32(tcg_env, tcg_el);
} else { } else {
gen_helper_rebuild_hflags_a32(cpu_env, tcg_el); gen_helper_rebuild_hflags_a32(tcg_env, tcg_el);
} }
} }
} }
@ -372,7 +372,7 @@ static void gen_rebuild_hflags(DisasContext *s, bool new_el)
static void gen_exception_internal(int excp) static void gen_exception_internal(int excp)
{ {
assert(excp_is_internal(excp)); assert(excp_is_internal(excp));
gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp)); gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
} }
static void gen_singlestep_exception(DisasContext *s) static void gen_singlestep_exception(DisasContext *s)
@ -617,10 +617,10 @@ static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
{ {
if (flags) { if (flags) {
switch (shiftop) { switch (shiftop) {
case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break; case 0: gen_helper_shl_cc(var, tcg_env, var, shift); break;
case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break; case 1: gen_helper_shr_cc(var, tcg_env, var, shift); break;
case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break; case 2: gen_helper_sar_cc(var, tcg_env, var, shift); break;
case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break; case 3: gen_helper_ror_cc(var, tcg_env, var, shift); break;
} }
} else { } else {
switch (shiftop) { switch (shiftop) {
@ -849,7 +849,7 @@ static inline void gen_bxns(DisasContext *s, int rm)
* is correct in the non-UNPREDICTABLE cases, and we can choose * is correct in the non-UNPREDICTABLE cases, and we can choose
* "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise. * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
*/ */
gen_helper_v7m_bxns(cpu_env, var); gen_helper_v7m_bxns(tcg_env, var);
s->base.is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
@ -862,7 +862,7 @@ static inline void gen_blxns(DisasContext *s, int rm)
* The blxns helper may throw an exception. * The blxns helper may throw an exception.
*/ */
gen_update_pc(s, curr_insn_len(s)); gen_update_pc(s, curr_insn_len(s));
gen_helper_v7m_blxns(cpu_env, var); gen_helper_v7m_blxns(tcg_env, var);
s->base.is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
@ -1024,7 +1024,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
* the insn really executes). * the insn really executes).
*/ */
gen_update_pc(s, 0); gen_update_pc(s, 0);
gen_helper_pre_hvc(cpu_env); gen_helper_pre_hvc(tcg_env);
/* Otherwise we will treat this as a real exception which /* Otherwise we will treat this as a real exception which
* happens after execution of the insn. (The distinction matters * happens after execution of the insn. (The distinction matters
* for the PC value reported to the exception handler and also * for the PC value reported to the exception handler and also
@ -1041,7 +1041,7 @@ static inline void gen_smc(DisasContext *s)
* the insn executes. * the insn executes.
*/ */
gen_update_pc(s, 0); gen_update_pc(s, 0);
gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa32_smc())); gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa32_smc()));
gen_update_pc(s, curr_insn_len(s)); gen_update_pc(s, curr_insn_len(s));
s->base.is_jmp = DISAS_SMC; s->base.is_jmp = DISAS_SMC;
} }
@ -1056,7 +1056,7 @@ static void gen_exception_internal_insn(DisasContext *s, int excp)
static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el) static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el)
{ {
gen_helper_exception_with_syndrome_el(cpu_env, tcg_constant_i32(excp), gen_helper_exception_with_syndrome_el(tcg_env, tcg_constant_i32(excp),
tcg_constant_i32(syndrome), tcg_el); tcg_constant_i32(syndrome), tcg_el);
} }
@ -1067,7 +1067,7 @@ static void gen_exception_el(int excp, uint32_t syndrome, uint32_t target_el)
static void gen_exception(int excp, uint32_t syndrome) static void gen_exception(int excp, uint32_t syndrome)
{ {
gen_helper_exception_with_syndrome(cpu_env, tcg_constant_i32(excp), gen_helper_exception_with_syndrome(tcg_env, tcg_constant_i32(excp),
tcg_constant_i32(syndrome)); tcg_constant_i32(syndrome));
} }
@ -1108,7 +1108,7 @@ static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
{ {
gen_set_condexec(s); gen_set_condexec(s);
gen_update_pc(s, 0); gen_update_pc(s, 0);
gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syn)); gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syn));
s->base.is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
@ -1192,20 +1192,20 @@ void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
switch (memop) { switch (memop) {
case MO_SB: case MO_SB:
tcg_gen_ld8s_i32(dest, cpu_env, off); tcg_gen_ld8s_i32(dest, tcg_env, off);
break; break;
case MO_UB: case MO_UB:
tcg_gen_ld8u_i32(dest, cpu_env, off); tcg_gen_ld8u_i32(dest, tcg_env, off);
break; break;
case MO_SW: case MO_SW:
tcg_gen_ld16s_i32(dest, cpu_env, off); tcg_gen_ld16s_i32(dest, tcg_env, off);
break; break;
case MO_UW: case MO_UW:
tcg_gen_ld16u_i32(dest, cpu_env, off); tcg_gen_ld16u_i32(dest, tcg_env, off);
break; break;
case MO_UL: case MO_UL:
case MO_SL: case MO_SL:
tcg_gen_ld_i32(dest, cpu_env, off); tcg_gen_ld_i32(dest, tcg_env, off);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1218,13 +1218,13 @@ void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
switch (memop) { switch (memop) {
case MO_SL: case MO_SL:
tcg_gen_ld32s_i64(dest, cpu_env, off); tcg_gen_ld32s_i64(dest, tcg_env, off);
break; break;
case MO_UL: case MO_UL:
tcg_gen_ld32u_i64(dest, cpu_env, off); tcg_gen_ld32u_i64(dest, tcg_env, off);
break; break;
case MO_UQ: case MO_UQ:
tcg_gen_ld_i64(dest, cpu_env, off); tcg_gen_ld_i64(dest, tcg_env, off);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1237,13 +1237,13 @@ void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
switch (memop) { switch (memop) {
case MO_8: case MO_8:
tcg_gen_st8_i32(src, cpu_env, off); tcg_gen_st8_i32(src, tcg_env, off);
break; break;
case MO_16: case MO_16:
tcg_gen_st16_i32(src, cpu_env, off); tcg_gen_st16_i32(src, tcg_env, off);
break; break;
case MO_32: case MO_32:
tcg_gen_st_i32(src, cpu_env, off); tcg_gen_st_i32(src, tcg_env, off);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1256,10 +1256,10 @@ void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
switch (memop) { switch (memop) {
case MO_32: case MO_32:
tcg_gen_st32_i64(src, cpu_env, off); tcg_gen_st32_i64(src, tcg_env, off);
break; break;
case MO_64: case MO_64:
tcg_gen_st_i64(src, cpu_env, off); tcg_gen_st_i64(src, tcg_env, off);
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
@ -1270,24 +1270,24 @@ void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{ {
tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
} }
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg) static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{ {
tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
} }
static inline TCGv_i32 iwmmxt_load_creg(int reg) static inline TCGv_i32 iwmmxt_load_creg(int reg)
{ {
TCGv_i32 var = tcg_temp_new_i32(); TCGv_i32 var = tcg_temp_new_i32();
tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
return var; return var;
} }
static inline void iwmmxt_store_creg(int reg, TCGv_i32 var) static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
{ {
tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
} }
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
@ -1329,7 +1329,7 @@ static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
{ \ { \
iwmmxt_load_reg(cpu_V1, rn); \ iwmmxt_load_reg(cpu_V1, rn); \
gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \ gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \
} }
#define IWMMXT_OP_ENV_SIZE(name) \ #define IWMMXT_OP_ENV_SIZE(name) \
@ -1340,7 +1340,7 @@ IWMMXT_OP_ENV(name##l)
#define IWMMXT_OP_ENV1(name) \ #define IWMMXT_OP_ENV1(name) \
static inline void gen_op_iwmmxt_##name##_M0(void) \ static inline void gen_op_iwmmxt_##name##_M0(void) \
{ \ { \
gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \ gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \
} }
IWMMXT_OP(maddsq) IWMMXT_OP(maddsq)
@ -2113,13 +2113,13 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
} }
switch ((insn >> 22) & 3) { switch ((insn >> 22) & 3) {
case 1: case 1:
gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 2: case 2:
gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 3: case 3:
gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
} }
gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_movq_wRn_M0(wrd);
@ -2139,13 +2139,13 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
} }
switch ((insn >> 22) & 3) { switch ((insn >> 22) & 3) {
case 1: case 1:
gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 2: case 2:
gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 3: case 3:
gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
} }
gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_movq_wRn_M0(wrd);
@ -2165,13 +2165,13 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
} }
switch ((insn >> 22) & 3) { switch ((insn >> 22) & 3) {
case 1: case 1:
gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 2: case 2:
gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 3: case 3:
gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
} }
gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_movq_wRn_M0(wrd);
@ -2191,19 +2191,19 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
if (gen_iwmmxt_shift(insn, 0xf, tmp)) { if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
return 1; return 1;
} }
gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 2: case 2:
if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
return 1; return 1;
} }
gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
case 3: case 3:
if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
return 1; return 1;
} }
gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp);
break; break;
} }
gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_movq_wRn_M0(wrd);
@ -2335,7 +2335,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
rd0 = (insn >> 16) & 0xf; rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0); gen_op_iwmmxt_movq_M0_wRn(rd0);
tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp); gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup(); gen_op_iwmmxt_set_cup();
@ -2857,7 +2857,7 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
gen_set_condexec(s); gen_set_condexec(s);
gen_update_pc(s, 0); gen_update_pc(s, 0);
tcg_reg = load_reg(s, rn); tcg_reg = load_reg(s, rn);
gen_helper_msr_banked(cpu_env, tcg_reg, gen_helper_msr_banked(tcg_env, tcg_reg,
tcg_constant_i32(tgtmode), tcg_constant_i32(tgtmode),
tcg_constant_i32(regno)); tcg_constant_i32(regno));
s->base.is_jmp = DISAS_UPDATE_EXIT; s->base.is_jmp = DISAS_UPDATE_EXIT;
@ -2876,7 +2876,7 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
gen_set_condexec(s); gen_set_condexec(s);
gen_update_pc(s, 0); gen_update_pc(s, 0);
tcg_reg = tcg_temp_new_i32(); tcg_reg = tcg_temp_new_i32();
gen_helper_mrs_banked(tcg_reg, cpu_env, gen_helper_mrs_banked(tcg_reg, tcg_env,
tcg_constant_i32(tgtmode), tcg_constant_i32(tgtmode),
tcg_constant_i32(regno)); tcg_constant_i32(regno));
store_reg(s, rn, tcg_reg); store_reg(s, rn, tcg_reg);
@ -2901,7 +2901,7 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
* be called after storing the new PC. * be called after storing the new PC.
*/ */
translator_io_start(&s->base); translator_io_start(&s->base);
gen_helper_cpsr_write_eret(cpu_env, cpsr); gen_helper_cpsr_write_eret(tcg_env, cpsr);
/* Must exit loop to check un-masked IRQs */ /* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
@ -2918,7 +2918,7 @@ static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
{ {
TCGv_ptr qc_ptr = tcg_temp_new_ptr(); TCGv_ptr qc_ptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc)); tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr, tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
opr_sz, max_sz, 0, fn); opr_sz, max_sz, 0, fn);
} }
@ -4605,11 +4605,11 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
case 0: case 0:
if (arm_dc_feature(s, ARM_FEATURE_AARCH64) if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
&& dc_isar_feature(aa64_tidcp1, s)) { && dc_isar_feature(aa64_tidcp1, s)) {
gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome)); gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
} }
break; break;
case 1: case 1:
gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome)); gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
break; break;
} }
} }
@ -4654,7 +4654,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
gen_set_condexec(s); gen_set_condexec(s);
gen_update_pc(s, 0); gen_update_pc(s, 0);
tcg_ri = tcg_temp_new_ptr(); tcg_ri = tcg_temp_new_ptr();
gen_helper_access_check_cp_reg(tcg_ri, cpu_env, gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
tcg_constant_i32(key), tcg_constant_i32(key),
tcg_constant_i32(syndrome), tcg_constant_i32(syndrome),
tcg_constant_i32(isread)); tcg_constant_i32(isread));
@ -4702,10 +4702,10 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
tcg_ri = gen_lookup_cp_reg(key); tcg_ri = gen_lookup_cp_reg(key);
} }
tmp64 = tcg_temp_new_i64(); tmp64 = tcg_temp_new_i64();
gen_helper_get_cp_reg64(tmp64, cpu_env, tcg_ri); gen_helper_get_cp_reg64(tmp64, tcg_env, tcg_ri);
} else { } else {
tmp64 = tcg_temp_new_i64(); tmp64 = tcg_temp_new_i64();
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset); tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset);
} }
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_gen_extrl_i64_i32(tmp, tmp64);
@ -4722,7 +4722,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
tcg_ri = gen_lookup_cp_reg(key); tcg_ri = gen_lookup_cp_reg(key);
} }
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_get_cp_reg(tmp, cpu_env, tcg_ri); gen_helper_get_cp_reg(tmp, tcg_env, tcg_ri);
} else { } else {
tmp = load_cpu_offset(ri->fieldoffset); tmp = load_cpu_offset(ri->fieldoffset);
} }
@ -4752,9 +4752,9 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
if (!tcg_ri) { if (!tcg_ri) {
tcg_ri = gen_lookup_cp_reg(key); tcg_ri = gen_lookup_cp_reg(key);
} }
gen_helper_set_cp_reg64(cpu_env, tcg_ri, tmp64); gen_helper_set_cp_reg64(tcg_env, tcg_ri, tmp64);
} else { } else {
tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset); tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset);
} }
} else { } else {
TCGv_i32 tmp = load_reg(s, rt); TCGv_i32 tmp = load_reg(s, rt);
@ -4762,7 +4762,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
if (!tcg_ri) { if (!tcg_ri) {
tcg_ri = gen_lookup_cp_reg(key); tcg_ri = gen_lookup_cp_reg(key);
} }
gen_helper_set_cp_reg(cpu_env, tcg_ri, tmp); gen_helper_set_cp_reg(tcg_env, tcg_ri, tmp);
} else { } else {
store_cpu_offset(tmp, ri->fieldoffset, 4); store_cpu_offset(tmp, ri->fieldoffset, 4);
} }
@ -5028,7 +5028,7 @@ static void gen_srs(DisasContext *s,
/* get_r13_banked() will raise an exception if called from System mode */ /* get_r13_banked() will raise an exception if called from System mode */
gen_set_condexec(s); gen_set_condexec(s);
gen_update_pc(s, 0); gen_update_pc(s, 0);
gen_helper_get_r13_banked(addr, cpu_env, tcg_constant_i32(mode)); gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode));
switch (amode) { switch (amode) {
case 0: /* DA */ case 0: /* DA */
offset = -4; offset = -4;
@ -5069,7 +5069,7 @@ static void gen_srs(DisasContext *s,
g_assert_not_reached(); g_assert_not_reached();
} }
tcg_gen_addi_i32(addr, addr, offset); tcg_gen_addi_i32(addr, addr, offset);
gen_helper_set_r13_banked(cpu_env, tcg_constant_i32(mode), addr); gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr);
} }
s->base.is_jmp = DISAS_UPDATE_EXIT; s->base.is_jmp = DISAS_UPDATE_EXIT;
} }
@ -5618,7 +5618,7 @@ static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift) static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
{ {
gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift)); gen_helper_mve_sqshll(r, tcg_env, n, tcg_constant_i32(shift));
} }
static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a) static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
@ -5628,7 +5628,7 @@ static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift) static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
{ {
gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift)); gen_helper_mve_uqshll(r, tcg_env, n, tcg_constant_i32(shift));
} }
static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a) static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
@ -5674,7 +5674,7 @@ static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
tcg_gen_concat_i32_i64(rda, rdalo, rdahi); tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
/* The helper takes care of the sign-extension of the low 8 bits of Rm */ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
fn(rda, cpu_env, rda, cpu_R[a->rm]); fn(rda, tcg_env, rda, cpu_R[a->rm]);
tcg_gen_extrl_i64_i32(rdalo, rda); tcg_gen_extrl_i64_i32(rdalo, rda);
tcg_gen_extrh_i64_i32(rdahi, rda); tcg_gen_extrh_i64_i32(rdahi, rda);
@ -5748,7 +5748,7 @@ static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift) static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
{ {
gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift)); gen_helper_mve_sqshl(r, tcg_env, n, tcg_constant_i32(shift));
} }
static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a) static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
@ -5758,7 +5758,7 @@ static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift) static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
{ {
gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift)); gen_helper_mve_uqshl(r, tcg_env, n, tcg_constant_i32(shift));
} }
static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a) static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
@ -5782,7 +5782,7 @@ static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
} }
/* The helper takes care of the sign-extension of the low 8 bits of Rm */ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]); fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]);
return true; return true;
} }
@ -5928,12 +5928,12 @@ static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
t0 = load_reg(s, a->rm); t0 = load_reg(s, a->rm);
t1 = load_reg(s, a->rn); t1 = load_reg(s, a->rn);
if (doub) { if (doub) {
gen_helper_add_saturate(t1, cpu_env, t1, t1); gen_helper_add_saturate(t1, tcg_env, t1, t1);
} }
if (add) { if (add) {
gen_helper_add_saturate(t0, cpu_env, t0, t1); gen_helper_add_saturate(t0, tcg_env, t0, t1);
} else { } else {
gen_helper_sub_saturate(t0, cpu_env, t0, t1); gen_helper_sub_saturate(t0, tcg_env, t0, t1);
} }
store_reg(s, a->rd, t0); store_reg(s, a->rd, t0);
return true; return true;
@ -5977,7 +5977,7 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
break; break;
case 1: case 1:
t1 = load_reg(s, a->ra); t1 = load_reg(s, a->ra);
gen_helper_add_setq(t0, cpu_env, t0, t1); gen_helper_add_setq(t0, tcg_env, t0, t1);
store_reg(s, a->rd, t0); store_reg(s, a->rd, t0);
break; break;
case 2: case 2:
@ -6041,7 +6041,7 @@ static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
tcg_gen_muls2_i32(t0, t1, t0, t1); tcg_gen_muls2_i32(t0, t1, t0, t1);
if (add) { if (add) {
t0 = load_reg(s, a->ra); t0 = load_reg(s, a->ra);
gen_helper_add_setq(t1, cpu_env, t1, t0); gen_helper_add_setq(t1, tcg_env, t1, t0);
} }
store_reg(s, a->rd, t1); store_reg(s, a->rd, t1);
return true; return true;
@ -6120,7 +6120,7 @@ static bool trans_ESB(DisasContext *s, arg_ESB *a)
* Test for EL2 present, and defer test for SEL2 to runtime. * Test for EL2 present, and defer test for SEL2 to runtime.
*/ */
if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
gen_helper_vesb(cpu_env); gen_helper_vesb(tcg_env);
} }
} }
return true; return true;
@ -6228,7 +6228,7 @@ static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
tmp = load_cpu_field(spsr); tmp = load_cpu_field(spsr);
} else { } else {
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_cpsr_read(tmp, cpu_env); gen_helper_cpsr_read(tmp, tcg_env);
} }
store_reg(s, a->rd, tmp); store_reg(s, a->rd, tmp);
return true; return true;
@ -6257,7 +6257,7 @@ static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
return false; return false;
} }
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_v7m_mrs(tmp, cpu_env, tcg_constant_i32(a->sysm)); gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm));
store_reg(s, a->rd, tmp); store_reg(s, a->rd, tmp);
return true; return true;
} }
@ -6271,7 +6271,7 @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
} }
addr = tcg_constant_i32((a->mask << 10) | a->sysm); addr = tcg_constant_i32((a->mask << 10) | a->sysm);
reg = load_reg(s, a->rn); reg = load_reg(s, a->rn);
gen_helper_v7m_msr(cpu_env, addr, reg); gen_helper_v7m_msr(tcg_env, addr, reg);
/* If we wrote to CONTROL, the EL might have changed */ /* If we wrote to CONTROL, the EL might have changed */
gen_rebuild_hflags(s, true); gen_rebuild_hflags(s, true);
gen_lookup_tb(s); gen_lookup_tb(s);
@ -6302,7 +6302,7 @@ static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
if (!arm_dc_feature(s, ARM_FEATURE_V8) && if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
arm_dc_feature(s, ARM_FEATURE_EL2) && arm_dc_feature(s, ARM_FEATURE_EL2) &&
s->current_el < 2 && s->ns) { s->current_el < 2 && s->ns) {
gen_helper_check_bxj_trap(cpu_env, tcg_constant_i32(a->rm)); gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm));
} }
/* Trivial implementation equivalent to bx. */ /* Trivial implementation equivalent to bx. */
gen_bx(s, load_reg(s, a->rm)); gen_bx(s, load_reg(s, a->rm));
@ -6480,7 +6480,7 @@ static bool trans_TT(DisasContext *s, arg_TT *a)
addr = load_reg(s, a->rn); addr = load_reg(s, a->rn);
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_v7m_tt(tmp, cpu_env, addr, tcg_constant_i32((a->A << 1) | a->T)); gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T));
store_reg(s, a->rd, tmp); store_reg(s, a->rd, tmp);
return true; return true;
} }
@ -6510,7 +6510,7 @@ static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
TCGv_i32 addr = load_reg(s, a->rn); TCGv_i32 addr = load_reg(s, a->rn);
if (s->v8m_stackcheck && a->rn == 13 && a->w) { if (s->v8m_stackcheck && a->rn == 13 && a->w) {
gen_helper_v8m_stackcheck(cpu_env, addr); gen_helper_v8m_stackcheck(tcg_env, addr);
} }
if (a->p) { if (a->p) {
@ -6665,9 +6665,9 @@ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
if (!a->u) { if (!a->u) {
TCGv_i32 newsp = tcg_temp_new_i32(); TCGv_i32 newsp = tcg_temp_new_i32();
tcg_gen_addi_i32(newsp, cpu_R[13], ofs); tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
gen_helper_v8m_stackcheck(cpu_env, newsp); gen_helper_v8m_stackcheck(tcg_env, newsp);
} else { } else {
gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]); gen_helper_v8m_stackcheck(tcg_env, cpu_R[13]);
} }
} }
@ -7319,7 +7319,7 @@ static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
t1 = load_reg(s, a->rm); t1 = load_reg(s, a->rm);
ge = tcg_temp_new_ptr(); ge = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE)); tcg_gen_addi_ptr(ge, tcg_env, offsetof(CPUARMState, GE));
gen(t0, t0, t1, ge); gen(t0, t0, t1, ge);
store_reg(s, a->rd, t0); store_reg(s, a->rd, t0);
@ -7433,7 +7433,7 @@ static bool op_sat(DisasContext *s, arg_sat *a,
tcg_gen_shli_i32(tmp, tmp, shift); tcg_gen_shli_i32(tmp, tmp, shift);
} }
gen(tmp, cpu_env, tmp, tcg_constant_i32(a->satimm)); gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm));
store_reg(s, a->rd, tmp); store_reg(s, a->rd, tmp);
return true; return true;
@ -7540,7 +7540,7 @@ static bool trans_SEL(DisasContext *s, arg_rrr *a)
t1 = load_reg(s, a->rn); t1 = load_reg(s, a->rn);
t2 = load_reg(s, a->rm); t2 = load_reg(s, a->rm);
t3 = tcg_temp_new_i32(); t3 = tcg_temp_new_i32();
tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE)); tcg_gen_ld_i32(t3, tcg_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(t1, t3, t1, t2); gen_helper_sel_flags(t1, t3, t1, t2);
store_reg(s, a->rd, t1); store_reg(s, a->rd, t1);
return true; return true;
@ -7618,11 +7618,11 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
if (a->ra != 15) { if (a->ra != 15) {
t2 = load_reg(s, a->ra); t2 = load_reg(s, a->ra);
gen_helper_add_setq(t1, cpu_env, t1, t2); gen_helper_add_setq(t1, tcg_env, t1, t2);
} }
} else if (a->ra == 15) { } else if (a->ra == 15) {
/* Single saturation-checking addition */ /* Single saturation-checking addition */
gen_helper_add_setq(t1, cpu_env, t1, t2); gen_helper_add_setq(t1, tcg_env, t1, t2);
} else { } else {
/* /*
* We need to add the products and Ra together and then * We need to add the products and Ra together and then
@ -7804,9 +7804,9 @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u)
t1 = load_reg(s, a->rn); t1 = load_reg(s, a->rn);
t2 = load_reg(s, a->rm); t2 = load_reg(s, a->rm);
if (u) { if (u) {
gen_helper_udiv(t1, cpu_env, t1, t2); gen_helper_udiv(t1, tcg_env, t1, t2);
} else { } else {
gen_helper_sdiv(t1, cpu_env, t1, t2); gen_helper_sdiv(t1, tcg_env, t1, t2);
} }
store_reg(s, a->rd, t1); store_reg(s, a->rd, t1);
return true; return true;
@ -7855,7 +7855,7 @@ static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
* either the original SP (if incrementing) or our * either the original SP (if incrementing) or our
* final SP (if decrementing), so that's what we check. * final SP (if decrementing), so that's what we check.
*/ */
gen_helper_v8m_stackcheck(cpu_env, addr); gen_helper_v8m_stackcheck(tcg_env, addr);
} }
return addr; return addr;
@ -7916,7 +7916,7 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
if (user && i != 15) { if (user && i != 15) {
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_helper_get_user_reg(tmp, cpu_env, tcg_constant_i32(i)); gen_helper_get_user_reg(tmp, tcg_env, tcg_constant_i32(i));
} else { } else {
tmp = load_reg(s, i); tmp = load_reg(s, i);
} }
@ -7999,7 +7999,7 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
if (user) { if (user) {
gen_helper_set_user_reg(cpu_env, tcg_constant_i32(i), tmp); gen_helper_set_user_reg(tcg_env, tcg_constant_i32(i), tmp);
} else if (i == a->rn) { } else if (i == a->rn) {
loaded_var = tmp; loaded_var = tmp;
loaded_base = true; loaded_base = true;
@ -8026,7 +8026,7 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
/* Restore CPSR from SPSR. */ /* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr); tmp = load_cpu_field(spsr);
translator_io_start(&s->base); translator_io_start(&s->base);
gen_helper_cpsr_write_eret(cpu_env, tmp); gen_helper_cpsr_write_eret(tcg_env, tmp);
/* Must exit loop to check un-masked IRQs */ /* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT; s->base.is_jmp = DISAS_EXIT;
} }
@ -8100,7 +8100,7 @@ static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
* Clear APSR (by calling the MSR helper with the same argument * Clear APSR (by calling the MSR helper with the same argument
* as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0) * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
*/ */
gen_helper_v7m_msr(cpu_env, tcg_constant_i32(0xc00), zero); gen_helper_v7m_msr(tcg_env, tcg_constant_i32(0xc00), zero);
} }
clear_eci_state(s); clear_eci_state(s);
return true; return true;
@ -8487,7 +8487,7 @@ static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
tcg_gen_movcond_i32(TCG_COND_LEU, masklen, tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
masklen, tcg_constant_i32(1 << (4 - a->size)), masklen, tcg_constant_i32(1 << (4 - a->size)),
rn_shifted, tcg_constant_i32(16)); rn_shifted, tcg_constant_i32(16));
gen_helper_mve_vctp(cpu_env, masklen); gen_helper_mve_vctp(tcg_env, masklen);
/* This insn updates predication bits */ /* This insn updates predication bits */
s->base.is_jmp = DISAS_UPDATE_NOCHAIN; s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
mve_update_eci(s); mve_update_eci(s);
@ -8665,12 +8665,12 @@ static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
/* FAULTMASK */ /* FAULTMASK */
if (a->F) { if (a->F) {
addr = tcg_constant_i32(19); addr = tcg_constant_i32(19);
gen_helper_v7m_msr(cpu_env, addr, tmp); gen_helper_v7m_msr(tcg_env, addr, tmp);
} }
/* PRIMASK */ /* PRIMASK */
if (a->I) { if (a->I) {
addr = tcg_constant_i32(16); addr = tcg_constant_i32(16);
gen_helper_v7m_msr(cpu_env, addr, tmp); gen_helper_v7m_msr(tcg_env, addr, tmp);
} }
gen_rebuild_hflags(s, false); gen_rebuild_hflags(s, false);
gen_lookup_tb(s); gen_lookup_tb(s);
@ -8740,7 +8740,7 @@ static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
return false; return false;
} }
if (a->E != (s->be_data == MO_BE)) { if (a->E != (s->be_data == MO_BE)) {
gen_helper_setend(cpu_env); gen_helper_setend(tcg_env);
s->base.is_jmp = DISAS_UPDATE_EXIT; s->base.is_jmp = DISAS_UPDATE_EXIT;
} }
return true; return true;
@ -9089,7 +9089,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cs->env_ptr; CPUARMState *env = cpu_env(cs);
ARMCPU *cpu = env_archcpu(env); ARMCPU *cpu = env_archcpu(env);
CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
uint32_t condexec, core_mmu_idx; uint32_t condexec, core_mmu_idx;
@ -9317,7 +9317,7 @@ static void arm_post_translate_insn(DisasContext *dc)
static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr; CPUARMState *env = cpu_env(cpu);
uint32_t pc = dc->base.pc_next; uint32_t pc = dc->base.pc_next;
unsigned int insn; unsigned int insn;
@ -9335,7 +9335,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* be possible after an indirect branch, at the start of the TB. * be possible after an indirect branch, at the start of the TB.
*/ */
assert(dc->base.num_insns == 1); assert(dc->base.num_insns == 1);
gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc)); gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
dc->base.is_jmp = DISAS_NORETURN; dc->base.is_jmp = DISAS_NORETURN;
dc->base.pc_next = QEMU_ALIGN_UP(pc, 4); dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return; return;
@ -9407,7 +9407,7 @@ static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr; CPUARMState *env = cpu_env(cpu);
uint32_t pc = dc->base.pc_next; uint32_t pc = dc->base.pc_next;
uint32_t insn; uint32_t insn;
bool is_16bit; bool is_16bit;
@ -9615,7 +9615,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
/* nothing more to generate */ /* nothing more to generate */
break; break;
case DISAS_WFI: case DISAS_WFI:
gen_helper_wfi(cpu_env, tcg_constant_i32(curr_insn_len(dc))); gen_helper_wfi(tcg_env, tcg_constant_i32(curr_insn_len(dc)));
/* /*
* The helper doesn't necessarily throw an exception, but we * The helper doesn't necessarily throw an exception, but we
* must go back to the main loop to check for interrupts anyway. * must go back to the main loop to check for interrupts anyway.
@ -9623,10 +9623,10 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_exit_tb(NULL, 0); tcg_gen_exit_tb(NULL, 0);
break; break;
case DISAS_WFE: case DISAS_WFE:
gen_helper_wfe(cpu_env); gen_helper_wfe(tcg_env);
break; break;
case DISAS_YIELD: case DISAS_YIELD:
gen_helper_yield(cpu_env); gen_helper_yield(tcg_env);
break; break;
case DISAS_SWI: case DISAS_SWI:
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));

View File

@ -329,7 +329,7 @@ static inline TCGv_i32 get_ahp_flag(void)
{ {
TCGv_i32 ret = tcg_temp_new_i32(); TCGv_i32 ret = tcg_temp_new_i32();
tcg_gen_ld_i32(ret, cpu_env, tcg_gen_ld_i32(ret, tcg_env,
offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR])); offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR]));
tcg_gen_extract_i32(ret, ret, 26, 1); tcg_gen_extract_i32(ret, ret, 26, 1);
@ -343,9 +343,9 @@ static inline void set_pstate_bits(uint32_t bits)
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
tcg_gen_ori_i32(p, p, bits); tcg_gen_ori_i32(p, p, bits);
tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
} }
/* Clear bits within PSTATE. */ /* Clear bits within PSTATE. */
@ -355,9 +355,9 @@ static inline void clear_pstate_bits(uint32_t bits)
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
tcg_gen_andi_i32(p, p, ~bits); tcg_gen_andi_i32(p, p, ~bits);
tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
} }
/* If the singlestep state is Active-not-pending, advance to Active-pending. */ /* If the singlestep state is Active-not-pending, advance to Active-pending. */
@ -374,7 +374,7 @@ static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
{ {
/* Fill in the same_el field of the syndrome in the helper. */ /* Fill in the same_el field of the syndrome in the helper. */
uint32_t syn = syn_swstep(false, isv, ex); uint32_t syn = syn_swstep(false, isv, ex);
gen_helper_exception_swstep(cpu_env, tcg_constant_i32(syn)); gen_helper_exception_swstep(tcg_env, tcg_constant_i32(syn));
} }
/* /*
@ -557,7 +557,7 @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
tcg_gen_addi_ptr(statusptr, cpu_env, offset); tcg_gen_addi_ptr(statusptr, tcg_env, offset);
return statusptr; return statusptr;
} }
@ -679,7 +679,7 @@ static inline void set_disas_label(DisasContext *s, DisasLabel l)
static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key) static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key)
{ {
TCGv_ptr ret = tcg_temp_new_ptr(); TCGv_ptr ret = tcg_temp_new_ptr();
gen_helper_lookup_cp_reg(ret, cpu_env, tcg_constant_i32(key)); gen_helper_lookup_cp_reg(ret, tcg_env, tcg_constant_i32(key));
return ret; return ret;
} }

View File

@ -147,8 +147,6 @@ static void avr_cpu_initfn(Object *obj)
{ {
AVRCPU *cpu = AVR_CPU(obj); AVRCPU *cpu = AVR_CPU(obj);
cpu_set_cpustate_pointers(cpu);
/* Set the number of interrupts supported by the CPU. */ /* Set the number of interrupts supported by the CPU. */
qdev_init_gpio_in(DEVICE(cpu), avr_cpu_set_int, qdev_init_gpio_in(DEVICE(cpu), avr_cpu_set_int,
sizeof(cpu->env.intsrc) * 8); sizeof(cpu->env.intsrc) * 8);
@ -390,6 +388,7 @@ static const TypeInfo avr_cpu_type_info[] = {
.name = TYPE_AVR_CPU, .name = TYPE_AVR_CPU,
.parent = TYPE_CPU, .parent = TYPE_CPU,
.instance_size = sizeof(AVRCPU), .instance_size = sizeof(AVRCPU),
.instance_align = __alignof(AVRCPU),
.instance_init = avr_cpu_initfn, .instance_init = avr_cpu_initfn,
.class_size = sizeof(AVRCPUClass), .class_size = sizeof(AVRCPUClass),
.class_init = avr_cpu_class_init, .class_init = avr_cpu_class_init,

View File

@ -148,7 +148,6 @@ struct ArchCPU {
CPUState parent_obj; CPUState parent_obj;
/*< public >*/ /*< public >*/
CPUNegativeOffsetState neg;
CPUAVRState env; CPUAVRState env;
}; };

View File

@ -127,25 +127,25 @@ void avr_cpu_tcg_init(void)
int i; int i;
#define AVR_REG_OFFS(x) offsetof(CPUAVRState, x) #define AVR_REG_OFFS(x) offsetof(CPUAVRState, x)
cpu_pc = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(pc_w), "pc"); cpu_pc = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(pc_w), "pc");
cpu_Cf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregC), "Cf"); cpu_Cf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregC), "Cf");
cpu_Zf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregZ), "Zf"); cpu_Zf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregZ), "Zf");
cpu_Nf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregN), "Nf"); cpu_Nf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregN), "Nf");
cpu_Vf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregV), "Vf"); cpu_Vf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregV), "Vf");
cpu_Sf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregS), "Sf"); cpu_Sf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregS), "Sf");
cpu_Hf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregH), "Hf"); cpu_Hf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregH), "Hf");
cpu_Tf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregT), "Tf"); cpu_Tf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregT), "Tf");
cpu_If = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregI), "If"); cpu_If = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregI), "If");
cpu_rampD = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampD), "rampD"); cpu_rampD = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampD), "rampD");
cpu_rampX = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampX), "rampX"); cpu_rampX = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampX), "rampX");
cpu_rampY = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampY), "rampY"); cpu_rampY = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampY), "rampY");
cpu_rampZ = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampZ), "rampZ"); cpu_rampZ = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampZ), "rampZ");
cpu_eind = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(eind), "eind"); cpu_eind = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(eind), "eind");
cpu_sp = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sp), "sp"); cpu_sp = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sp), "sp");
cpu_skip = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(skip), "skip"); cpu_skip = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(skip), "skip");
for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) { for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) {
cpu_r[i] = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(r[i]), cpu_r[i] = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(r[i]),
reg_names[i]); reg_names[i]);
} }
#undef AVR_REG_OFFS #undef AVR_REG_OFFS
@ -184,7 +184,7 @@ static int append_16(DisasContext *ctx, int x)
static bool avr_have_feature(DisasContext *ctx, int feature) static bool avr_have_feature(DisasContext *ctx, int feature)
{ {
if (!avr_feature(ctx->env, feature)) { if (!avr_feature(ctx->env, feature)) {
gen_helper_unsupported(cpu_env); gen_helper_unsupported(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
return false; return false;
} }
@ -1295,7 +1295,7 @@ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
TCGv data = tcg_temp_new_i32(); TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg); TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port); gen_helper_inb(data, tcg_env, port);
tcg_gen_andi_tl(data, data, 1 << a->bit); tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_EQ; ctx->skip_cond = TCG_COND_EQ;
ctx->skip_var0 = data; ctx->skip_var0 = data;
@ -1313,7 +1313,7 @@ static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
TCGv data = tcg_temp_new_i32(); TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg); TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port); gen_helper_inb(data, tcg_env, port);
tcg_gen_andi_tl(data, data, 1 << a->bit); tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_NE; ctx->skip_cond = TCG_COND_NE;
ctx->skip_var0 = data; ctx->skip_var0 = data;
@ -1494,7 +1494,7 @@ static TCGv gen_get_zaddr(void)
static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr) static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
{ {
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullwr(cpu_env, data, addr); gen_helper_fullwr(tcg_env, data, addr);
} else { } else {
tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB); tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB);
} }
@ -1503,7 +1503,7 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr) static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
{ {
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
gen_helper_fullrd(data, cpu_env, addr); gen_helper_fullrd(data, tcg_env, addr);
} else { } else {
tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB); tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
} }
@ -2130,7 +2130,7 @@ static bool trans_IN(DisasContext *ctx, arg_IN *a)
TCGv Rd = cpu_r[a->rd]; TCGv Rd = cpu_r[a->rd];
TCGv port = tcg_constant_i32(a->imm); TCGv port = tcg_constant_i32(a->imm);
gen_helper_inb(Rd, cpu_env, port); gen_helper_inb(Rd, tcg_env, port);
return true; return true;
} }
@ -2143,7 +2143,7 @@ static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
TCGv Rd = cpu_r[a->rd]; TCGv Rd = cpu_r[a->rd];
TCGv port = tcg_constant_i32(a->imm); TCGv port = tcg_constant_i32(a->imm);
gen_helper_outb(cpu_env, port, Rd); gen_helper_outb(tcg_env, port, Rd);
return true; return true;
} }
@ -2411,9 +2411,9 @@ static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
TCGv data = tcg_temp_new_i32(); TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg); TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port); gen_helper_inb(data, tcg_env, port);
tcg_gen_ori_tl(data, data, 1 << a->bit); tcg_gen_ori_tl(data, data, 1 << a->bit);
gen_helper_outb(cpu_env, port, data); gen_helper_outb(tcg_env, port, data);
return true; return true;
} }
@ -2426,9 +2426,9 @@ static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
TCGv data = tcg_temp_new_i32(); TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg); TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port); gen_helper_inb(data, tcg_env, port);
tcg_gen_andi_tl(data, data, ~(1 << a->bit)); tcg_gen_andi_tl(data, data, ~(1 << a->bit));
gen_helper_outb(cpu_env, port, data); gen_helper_outb(tcg_env, port, data);
return true; return true;
} }
@ -2551,7 +2551,7 @@ static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a)
#ifdef BREAKPOINT_ON_BREAK #ifdef BREAKPOINT_ON_BREAK
tcg_gen_movi_tl(cpu_pc, ctx->npc - 1); tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
gen_helper_debug(cpu_env); gen_helper_debug(tcg_env);
ctx->base.is_jmp = DISAS_EXIT; ctx->base.is_jmp = DISAS_EXIT;
#else #else
/* NOP */ /* NOP */
@ -2577,7 +2577,7 @@ static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
*/ */
static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a) static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
{ {
gen_helper_sleep(cpu_env); gen_helper_sleep(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
return true; return true;
} }
@ -2589,7 +2589,7 @@ static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
*/ */
static bool trans_WDR(DisasContext *ctx, arg_WDR *a) static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
{ {
gen_helper_wdr(cpu_env); gen_helper_wdr(tcg_env);
return true; return true;
} }
@ -2608,7 +2608,7 @@ static void translate(DisasContext *ctx)
uint32_t opcode = next_word(ctx); uint32_t opcode = next_word(ctx);
if (!decode_insn(ctx, opcode)) { if (!decode_insn(ctx, opcode)) {
gen_helper_unsupported(cpu_env); gen_helper_unsupported(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN; ctx->base.is_jmp = DISAS_NORETURN;
} }
} }
@ -2657,7 +2657,7 @@ static bool canonicalize_skip(DisasContext *ctx)
static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUAVRState *env = cs->env_ptr; CPUAVRState *env = cpu_env(cs);
uint32_t tb_flags = ctx->base.tb->flags; uint32_t tb_flags = ctx->base.tb->flags;
ctx->cs = cs; ctx->cs = cs;

View File

@ -201,8 +201,6 @@ static void cris_cpu_initfn(Object *obj)
CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
CPUCRISState *env = &cpu->env; CPUCRISState *env = &cpu->env;
cpu_set_cpustate_pointers(cpu);
env->pregs[PR_VR] = ccc->vr; env->pregs[PR_VR] = ccc->vr;
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@ -345,6 +343,7 @@ static const TypeInfo cris_cpu_model_type_infos[] = {
.name = TYPE_CRIS_CPU, .name = TYPE_CRIS_CPU,
.parent = TYPE_CPU, .parent = TYPE_CPU,
.instance_size = sizeof(CRISCPU), .instance_size = sizeof(CRISCPU),
.instance_align = __alignof(CRISCPU),
.instance_init = cris_cpu_initfn, .instance_init = cris_cpu_initfn,
.abstract = true, .abstract = true,
.class_size = sizeof(CRISCPUClass), .class_size = sizeof(CRISCPUClass),

View File

@ -178,7 +178,6 @@ struct ArchCPU {
CPUState parent_obj; CPUState parent_obj;
/*< public >*/ /*< public >*/
CPUNegativeOffsetState neg;
CPUCRISState env; CPUCRISState env;
}; };

View File

@ -171,9 +171,9 @@ static const int preg_sizes[] = {
}; };
#define t_gen_mov_TN_env(tn, member) \ #define t_gen_mov_TN_env(tn, member) \
tcg_gen_ld_tl(tn, cpu_env, offsetof(CPUCRISState, member)) tcg_gen_ld_tl(tn, tcg_env, offsetof(CPUCRISState, member))
#define t_gen_mov_env_TN(member, tn) \ #define t_gen_mov_env_TN(member, tn) \
tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member)) tcg_gen_st_tl(tn, tcg_env, offsetof(CPUCRISState, member))
#define t_gen_movi_env_TN(member, c) \ #define t_gen_movi_env_TN(member, c) \
t_gen_mov_env_TN(member, tcg_constant_tl(c)) t_gen_mov_env_TN(member, tcg_constant_tl(c))
@ -197,10 +197,10 @@ static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
tcg_gen_andi_tl(cpu_PR[r], tn, 3); tcg_gen_andi_tl(cpu_PR[r], tn, 3);
} else { } else {
if (r == PR_PID) { if (r == PR_PID) {
gen_helper_tlb_flush_pid(cpu_env, tn); gen_helper_tlb_flush_pid(tcg_env, tn);
} }
if (dc->tb_flags & S_FLAG && r == PR_SPC) { if (dc->tb_flags & S_FLAG && r == PR_SPC) {
gen_helper_spc_write(cpu_env, tn); gen_helper_spc_write(tcg_env, tn);
} else if (r == PR_CCS) { } else if (r == PR_CCS) {
dc->cpustate_changed = 1; dc->cpustate_changed = 1;
} }
@ -265,7 +265,7 @@ static void cris_lock_irq(DisasContext *dc)
static inline void t_gen_raise_exception(uint32_t index) static inline void t_gen_raise_exception(uint32_t index)
{ {
gen_helper_raise_exception(cpu_env, tcg_constant_i32(index)); gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
} }
static void t_gen_lsl(TCGv d, TCGv a, TCGv b) static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
@ -504,17 +504,17 @@ static void cris_evaluate_flags(DisasContext *dc)
switch (dc->cc_op) { switch (dc->cc_op) {
case CC_OP_MCP: case CC_OP_MCP:
gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], cpu_env, gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_src, cpu_PR[PR_CCS], cc_src,
cc_dest, cc_result); cc_dest, cc_result);
break; break;
case CC_OP_MULS: case CC_OP_MULS:
gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], cpu_env, gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_result, cpu_PR[PR_CCS], cc_result,
cpu_PR[PR_MOF]); cpu_PR[PR_MOF]);
break; break;
case CC_OP_MULU: case CC_OP_MULU:
gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], cpu_env, gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_result, cpu_PR[PR_CCS], cc_result,
cpu_PR[PR_MOF]); cpu_PR[PR_MOF]);
break; break;
@ -528,14 +528,14 @@ static void cris_evaluate_flags(DisasContext *dc)
switch (dc->cc_size) { switch (dc->cc_size) {
case 4: case 4:
gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS], gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
cpu_env, cpu_PR[PR_CCS], cc_result); tcg_env, cpu_PR[PR_CCS], cc_result);
break; break;
case 2: case 2:
gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS], gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
cpu_env, cpu_PR[PR_CCS], cc_result); tcg_env, cpu_PR[PR_CCS], cc_result);
break; break;
default: default:
gen_helper_evaluate_flags(cpu_env); gen_helper_evaluate_flags(tcg_env);
break; break;
} }
break; break;
@ -545,21 +545,21 @@ static void cris_evaluate_flags(DisasContext *dc)
case CC_OP_SUB: case CC_OP_SUB:
case CC_OP_CMP: case CC_OP_CMP:
if (dc->cc_size == 4) { if (dc->cc_size == 4) {
gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], cpu_env, gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result); cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
} else { } else {
gen_helper_evaluate_flags(cpu_env); gen_helper_evaluate_flags(tcg_env);
} }
break; break;
default: default:
switch (dc->cc_size) { switch (dc->cc_size) {
case 4: case 4:
gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], cpu_env, gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result); cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
break; break;
default: default:
gen_helper_evaluate_flags(cpu_env); gen_helper_evaluate_flags(tcg_env);
break; break;
} }
break; break;
@ -1330,7 +1330,7 @@ static int dec_btstq(CPUCRISState *env, DisasContext *dc)
cris_cc_mask(dc, CC_MASK_NZ); cris_cc_mask(dc, CC_MASK_NZ);
c = tcg_constant_tl(dc->op1); c = tcg_constant_tl(dc->op1);
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2], gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2],
c, cpu_PR[PR_CCS]); c, cpu_PR[PR_CCS]);
cris_alu(dc, CC_OP_MOVE, cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4); cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
@ -1744,7 +1744,7 @@ static int dec_btst_r(CPUCRISState *env, DisasContext *dc)
dc->op1, dc->op2); dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ); cris_cc_mask(dc, CC_MASK_NZ);
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2], gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2],
cpu_R[dc->op1], cpu_PR[PR_CCS]); cpu_R[dc->op1], cpu_PR[PR_CCS]);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
cpu_R[dc->op2], cpu_R[dc->op2], 4); cpu_R[dc->op2], cpu_R[dc->op2], 4);
@ -1946,7 +1946,7 @@ static int dec_move_rs(CPUCRISState *env, DisasContext *dc)
c1 = tcg_constant_tl(dc->op1); c1 = tcg_constant_tl(dc->op1);
c2 = tcg_constant_tl(dc->op2); c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0); cris_cc_mask(dc, 0);
gen_helper_movl_sreg_reg(cpu_env, c2, c1); gen_helper_movl_sreg_reg(tcg_env, c2, c1);
return 2; return 2;
} }
static int dec_move_sr(CPUCRISState *env, DisasContext *dc) static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
@ -1956,7 +1956,7 @@ static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
c1 = tcg_constant_tl(dc->op1); c1 = tcg_constant_tl(dc->op1);
c2 = tcg_constant_tl(dc->op2); c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0); cris_cc_mask(dc, 0);
gen_helper_movl_reg_sreg(cpu_env, c1, c2); gen_helper_movl_reg_sreg(tcg_env, c1, c2);
return 2; return 2;
} }
@ -2693,7 +2693,7 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
cris_cc_mask(dc, 0); cris_cc_mask(dc, 0);
if (dc->op2 == 15) { if (dc->op2 == 15) {
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(CRISCPU, env) + offsetof(CPUState, halted)); -offsetof(CRISCPU, env) + offsetof(CPUState, halted));
tcg_gen_movi_tl(env_pc, dc->pc + 2); tcg_gen_movi_tl(env_pc, dc->pc + 2);
t_gen_raise_exception(EXCP_HLT); t_gen_raise_exception(EXCP_HLT);
@ -2706,7 +2706,7 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
/* rfe. */ /* rfe. */
LOG_DIS("rfe\n"); LOG_DIS("rfe\n");
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
gen_helper_rfe(cpu_env); gen_helper_rfe(tcg_env);
dc->base.is_jmp = DISAS_UPDATE; dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = true; dc->cpustate_changed = true;
break; break;
@ -2714,7 +2714,7 @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
/* rfn. */ /* rfn. */
LOG_DIS("rfn\n"); LOG_DIS("rfn\n");
cris_evaluate_flags(dc); cris_evaluate_flags(dc);
gen_helper_rfn(cpu_env); gen_helper_rfn(tcg_env);
dc->base.is_jmp = DISAS_UPDATE; dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = true; dc->cpustate_changed = true;
break; break;
@ -2948,7 +2948,7 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUCRISState *env = cs->env_ptr; CPUCRISState *env = cpu_env(cs);
uint32_t tb_flags = dc->base.tb->flags; uint32_t tb_flags = dc->base.tb->flags;
uint32_t pc_start; uint32_t pc_start;
@ -3006,7 +3006,7 @@ static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUCRISState *env = cs->env_ptr; CPUCRISState *env = cpu_env(cs);
unsigned int insn_len; unsigned int insn_len;
/* Pretty disas. */ /* Pretty disas. */
@ -3238,41 +3238,41 @@ void cris_initialize_tcg(void)
{ {
int i; int i;
cc_x = tcg_global_mem_new(cpu_env, cc_x = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_x), "cc_x"); offsetof(CPUCRISState, cc_x), "cc_x");
cc_src = tcg_global_mem_new(cpu_env, cc_src = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_src), "cc_src"); offsetof(CPUCRISState, cc_src), "cc_src");
cc_dest = tcg_global_mem_new(cpu_env, cc_dest = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_dest), offsetof(CPUCRISState, cc_dest),
"cc_dest"); "cc_dest");
cc_result = tcg_global_mem_new(cpu_env, cc_result = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_result), offsetof(CPUCRISState, cc_result),
"cc_result"); "cc_result");
cc_op = tcg_global_mem_new(cpu_env, cc_op = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_op), "cc_op"); offsetof(CPUCRISState, cc_op), "cc_op");
cc_size = tcg_global_mem_new(cpu_env, cc_size = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_size), offsetof(CPUCRISState, cc_size),
"cc_size"); "cc_size");
cc_mask = tcg_global_mem_new(cpu_env, cc_mask = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_mask), offsetof(CPUCRISState, cc_mask),
"cc_mask"); "cc_mask");
env_pc = tcg_global_mem_new(cpu_env, env_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pc), offsetof(CPUCRISState, pc),
"pc"); "pc");
env_btarget = tcg_global_mem_new(cpu_env, env_btarget = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btarget), offsetof(CPUCRISState, btarget),
"btarget"); "btarget");
env_btaken = tcg_global_mem_new(cpu_env, env_btaken = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btaken), offsetof(CPUCRISState, btaken),
"btaken"); "btaken");
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new(cpu_env, cpu_R[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, regs[i]), offsetof(CPUCRISState, regs[i]),
regnames_v32[i]); regnames_v32[i]);
} }
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
cpu_PR[i] = tcg_global_mem_new(cpu_env, cpu_PR[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pregs[i]), offsetof(CPUCRISState, pregs[i]),
pregnames_v32[i]); pregnames_v32[i]);
} }

View File

@ -282,7 +282,7 @@ static unsigned int dec10_quick_imm(DisasContext *dc)
} else { } else {
/* BTST */ /* BTST */
cris_update_cc_op(dc, CC_OP_FLAGS, 4); cris_update_cc_op(dc, CC_OP_FLAGS, 4);
gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst], gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst],
c, cpu_PR[PR_CCS]); c, cpu_PR[PR_CCS]);
} }
break; break;
@ -696,7 +696,7 @@ static unsigned int dec10_reg(DisasContext *dc)
LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
cris_cc_mask(dc, CC_MASK_NZVC); cris_cc_mask(dc, CC_MASK_NZVC);
cris_update_cc_op(dc, CC_OP_FLAGS, 4); cris_update_cc_op(dc, CC_OP_FLAGS, 4);
gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst], gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst],
cpu_R[dc->src], cpu_PR[PR_CCS]); cpu_R[dc->src], cpu_PR[PR_CCS]);
break; break;
case CRISV10_REG_DSTEP: case CRISV10_REG_DSTEP:
@ -1235,41 +1235,41 @@ void cris_initialize_crisv10_tcg(void)
{ {
int i; int i;
cc_x = tcg_global_mem_new(cpu_env, cc_x = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_x), "cc_x"); offsetof(CPUCRISState, cc_x), "cc_x");
cc_src = tcg_global_mem_new(cpu_env, cc_src = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_src), "cc_src"); offsetof(CPUCRISState, cc_src), "cc_src");
cc_dest = tcg_global_mem_new(cpu_env, cc_dest = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_dest), offsetof(CPUCRISState, cc_dest),
"cc_dest"); "cc_dest");
cc_result = tcg_global_mem_new(cpu_env, cc_result = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_result), offsetof(CPUCRISState, cc_result),
"cc_result"); "cc_result");
cc_op = tcg_global_mem_new(cpu_env, cc_op = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_op), "cc_op"); offsetof(CPUCRISState, cc_op), "cc_op");
cc_size = tcg_global_mem_new(cpu_env, cc_size = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_size), offsetof(CPUCRISState, cc_size),
"cc_size"); "cc_size");
cc_mask = tcg_global_mem_new(cpu_env, cc_mask = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_mask), offsetof(CPUCRISState, cc_mask),
"cc_mask"); "cc_mask");
env_pc = tcg_global_mem_new(cpu_env, env_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pc), offsetof(CPUCRISState, pc),
"pc"); "pc");
env_btarget = tcg_global_mem_new(cpu_env, env_btarget = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btarget), offsetof(CPUCRISState, btarget),
"btarget"); "btarget");
env_btaken = tcg_global_mem_new(cpu_env, env_btaken = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btaken), offsetof(CPUCRISState, btaken),
"btaken"); "btaken");
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new(cpu_env, cpu_R[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, regs[i]), offsetof(CPUCRISState, regs[i]),
regnames_v10[i]); regnames_v10[i]);
} }
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
cpu_PR[i] = tcg_global_mem_new(cpu_env, cpu_PR[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pregs[i]), offsetof(CPUCRISState, pregs[i]),
pregnames_v10[i]); pregnames_v10[i]);
} }

Some files were not shown because too many files have changed in this diff Show More