mirror of https://github.com/xemu-project/xemu.git
HW core patch queue
. Deprecate unmaintained SH-4 models (Samuel) . HPET: Convert DPRINTF calls to trace events (Daniel) . Implement buffered block writes in Intel PFlash (Gerd) . Ignore ELF loadable segments with zero size (Bin) . ESP/NCR53C9x: PCI DMA fixes (Mark) . PIIX: Simplify Xen PCI IRQ routing (Bernhard) . Restrict CPU 'start-powered-off' property to sysemu (Phil) . target/alpha: Only build sys_helper.c on system emulation (Phil) . target/xtensa: Use generic instruction breakpoint API & add test (Max) . Restrict icount to system emulation (Phil) . Do not set CPUState TCG-specific flags in non-TCG accels (Phil) . Cleanup TCG tb_invalidate API (Phil) . Correct LoongArch/KVM include path (Bibo) . Do not ignore throttle errors in crypto backends (Phil) . MAINTAINERS updates (Raphael, Zhao) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmWqXbkACgkQ4+MsLN6t wN6VVBAAkP/Bs2JfQYobPZVV868wceM97KeUJMXP2YWf6dSLpHRCQN5KtuJcACM9 y3k3R7nMeVJSGmzl/1gF1G9JhjoCLoVLX/ejeBppv4Wq//9sEdggaQfdCwkhWw2o IK/gPjTZpimE7Er4hPlxmuhSRuM1MX4duKFRRfuZpE7XY14Y7/Hk12VIG7LooO0x 2Sl8CaU0DN7CWmRVDoUkwVx7JBy28UVarRDsgpBim7oKmjjBFnCJkH6B6NJXEiYr z1BmIcHa87S09kG1ek+y8aZpG9iPC7nUWjPIQyJGhnfrnBuO7hQHwCLIjHHp5QBR BoMr8YQNTI34/M/D8pBfg96LrGDjkQOfwRyRddkMP/jJcNPMAPMNGbfVaIrfij1e T+jFF4gQenOvy1XKCY3Uk/a11P3tIRFBEeOlzzQg4Aje9W2MhUNwK2HTlRfBbrRr V30R764FDmHlsyOu6/E3jqp4GVCgryF1bglPOBjVEU5uytbQTP8jshIpGVnxBbF+ OpFwtsoDbsousNKVcO5+B0mlHcB9Ru9h11M5/YD/jfLMk95Ga90JGdgYpqQ5tO5Y aqQhKfCKbfgKuKhysxpsdWAwHZzVrlSf+UrObF0rl2lMXXfcppjCqNaw4QJ0oedc DNBxTPcCE2vWhUzP3A60VH7jLh4nLaqSTrxxQKkbx+Je1ERGrxs= =KmQh -----END PGP SIGNATURE----- Merge tag 'hw-cpus-20240119' of https://github.com/philmd/qemu into staging HW core patch queue . Deprecate unmaintained SH-4 models (Samuel) . HPET: Convert DPRINTF calls to trace events (Daniel) . Implement buffered block writes in Intel PFlash (Gerd) . Ignore ELF loadable segments with zero size (Bin) . ESP/NCR53C9x: PCI DMA fixes (Mark) . PIIX: Simplify Xen PCI IRQ routing (Bernhard) . Restrict CPU 'start-powered-off' property to sysemu (Phil) . target/alpha: Only build sys_helper.c on system emulation (Phil) . target/xtensa: Use generic instruction breakpoint API & add test (Max) . Restrict icount to system emulation (Phil) . Do not set CPUState TCG-specific flags in non-TCG accels (Phil) . Cleanup TCG tb_invalidate API (Phil) . Correct LoongArch/KVM include path (Bibo) . Do not ignore throttle errors in crypto backends (Phil) . MAINTAINERS updates (Raphael, Zhao) # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmWqXbkACgkQ4+MsLN6t # wN6VVBAAkP/Bs2JfQYobPZVV868wceM97KeUJMXP2YWf6dSLpHRCQN5KtuJcACM9 # y3k3R7nMeVJSGmzl/1gF1G9JhjoCLoVLX/ejeBppv4Wq//9sEdggaQfdCwkhWw2o # IK/gPjTZpimE7Er4hPlxmuhSRuM1MX4duKFRRfuZpE7XY14Y7/Hk12VIG7LooO0x # 2Sl8CaU0DN7CWmRVDoUkwVx7JBy28UVarRDsgpBim7oKmjjBFnCJkH6B6NJXEiYr # z1BmIcHa87S09kG1ek+y8aZpG9iPC7nUWjPIQyJGhnfrnBuO7hQHwCLIjHHp5QBR # BoMr8YQNTI34/M/D8pBfg96LrGDjkQOfwRyRddkMP/jJcNPMAPMNGbfVaIrfij1e # T+jFF4gQenOvy1XKCY3Uk/a11P3tIRFBEeOlzzQg4Aje9W2MhUNwK2HTlRfBbrRr # V30R764FDmHlsyOu6/E3jqp4GVCgryF1bglPOBjVEU5uytbQTP8jshIpGVnxBbF+ # OpFwtsoDbsousNKVcO5+B0mlHcB9Ru9h11M5/YD/jfLMk95Ga90JGdgYpqQ5tO5Y # aqQhKfCKbfgKuKhysxpsdWAwHZzVrlSf+UrObF0rl2lMXXfcppjCqNaw4QJ0oedc # DNBxTPcCE2vWhUzP3A60VH7jLh4nLaqSTrxxQKkbx+Je1ERGrxs= # =KmQh # -----END PGP SIGNATURE----- # gpg: Signature made Fri 19 Jan 2024 11:32:09 GMT # gpg: using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE # gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full] # Primary key fingerprint: FAAB E75E 1291 7221 DCFD 6BB2 E3E3 2C2C DEAD C0DE * tag 'hw-cpus-20240119' of https://github.com/philmd/qemu: (36 commits) configure: Add linux header compile support for LoongArch MAINTAINERS: Update hw/core/cpu.c entry MAINTAINERS: Update Raphael Norwitz email hw/elf_ops: Ignore loadable segments with zero size hw/scsi/esp-pci: set DMA_STAT_BCMBLT when BLAST command issued hw/scsi/esp-pci: synchronise setting of DMA_STAT_DONE with ESP completion interrupt hw/scsi/esp-pci: generate PCI interrupt from separate ESP and PCI sources hw/scsi/esp-pci: use correct address register for PCI DMA transfers target/riscv: Rename tcg_cpu_FOO() to include 'riscv' target/i386: Rename tcg_cpu_FOO() to include 'x86' hw/s390x: Rename cpu_class_init() to include 'sclp' hw/core/cpu: Rename cpu_class_init() to include 'common' accel: Rename accel_init_ops_interfaces() to include 'system' cpus: Restrict 'start-powered-off' property to system emulation system/watchpoint: Move TCG specific code to accel/tcg/ system/replay: Restrict icount to system emulation hw/pflash: implement update buffer for block writes hw/pflash: use ldn_{be,le}_p and stn_{be,le}_p hw/pflash: refactor pflash_data_write() hw/i386/pc_piix: Make piix_intx_routing_notifier_xen() more device independent ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
3f2a357b95
|
@ -1867,7 +1867,8 @@ M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
|
|||
R: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Yanan Wang <wangyanan55@huawei.com>
|
||||
S: Supported
|
||||
F: hw/core/cpu.c
|
||||
F: hw/core/cpu-common.c
|
||||
F: hw/core/cpu-sysemu.c
|
||||
F: hw/core/machine-qmp-cmds.c
|
||||
F: hw/core/machine.c
|
||||
F: hw/core/machine-smp.c
|
||||
|
@ -2555,7 +2556,7 @@ F: include/hw/virtio/virtio-gpu.h
|
|||
F: docs/system/devices/virtio-gpu.rst
|
||||
|
||||
vhost-user-blk
|
||||
M: Raphael Norwitz <raphael.norwitz@nutanix.com>
|
||||
M: Raphael Norwitz <raphael.s.norwitz@gmail.com>
|
||||
S: Maintained
|
||||
F: contrib/vhost-user-blk/
|
||||
F: contrib/vhost-user-scsi/
|
||||
|
|
|
@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms)
|
|||
}
|
||||
|
||||
/* initialize the arch-independent accel operation interfaces */
|
||||
void accel_init_ops_interfaces(AccelClass *ac)
|
||||
void accel_system_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name;
|
||||
char *ops_name;
|
||||
|
|
|
@ -10,6 +10,6 @@
|
|||
#ifndef ACCEL_SYSTEM_H
|
||||
#define ACCEL_SYSTEM_H
|
||||
|
||||
void accel_init_ops_interfaces(AccelClass *ac);
|
||||
void accel_system_init_ops_interfaces(AccelClass *ac);
|
||||
|
||||
#endif /* ACCEL_SYSTEM_H */
|
||||
|
|
|
@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
|
|||
void accel_init_interfaces(AccelClass *ac)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
accel_init_ops_interfaces(ac);
|
||||
accel_system_init_ops_interfaces(ac);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
accel_init_cpu_interfaces(ac);
|
||||
|
|
|
@ -27,7 +27,6 @@ static void *dummy_cpu_thread_fn(void *arg)
|
|||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
|
|
|
@ -428,7 +428,6 @@ static void *hvf_cpu_thread_fn(void *arg)
|
|||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
|
|
|
@ -36,7 +36,6 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
|||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu, &error_fatal);
|
||||
|
|
|
@ -49,21 +49,19 @@ static bool icount_sleep = true;
|
|||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||
#define MAX_ICOUNT_SHIFT 10
|
||||
|
||||
/*
|
||||
* 0 = Do not count executed instructions.
|
||||
* 1 = Fixed conversion of insn to ns via "shift" option
|
||||
* 2 = Runtime adaptive algorithm to compute shift
|
||||
*/
|
||||
int use_icount;
|
||||
/* Do not count executed instructions */
|
||||
ICountMode use_icount = ICOUNT_DISABLED;
|
||||
|
||||
static void icount_enable_precise(void)
|
||||
{
|
||||
use_icount = 1;
|
||||
/* Fixed conversion of insn to ns via "shift" option */
|
||||
use_icount = ICOUNT_PRECISE;
|
||||
}
|
||||
|
||||
static void icount_enable_adaptive(void)
|
||||
{
|
||||
use_icount = 2;
|
||||
/* Runtime adaptive algorithm to compute shift */
|
||||
use_icount = ICOUNT_ADAPTATIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -256,7 +254,7 @@ static void icount_warp_rt(void)
|
|||
int64_t warp_delta;
|
||||
|
||||
warp_delta = clock - timers_state.vm_clock_warp_start;
|
||||
if (icount_enabled() == 2) {
|
||||
if (icount_enabled() == ICOUNT_ADAPTATIVE) {
|
||||
/*
|
||||
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
|
||||
* ahead of real time (it might already be ahead so careful not
|
||||
|
@ -419,7 +417,7 @@ void icount_account_warp_timer(void)
|
|||
icount_warp_rt();
|
||||
}
|
||||
|
||||
void icount_configure(QemuOpts *opts, Error **errp)
|
||||
bool icount_configure(QemuOpts *opts, Error **errp)
|
||||
{
|
||||
const char *option = qemu_opt_get(opts, "shift");
|
||||
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
|
||||
|
@ -429,27 +427,28 @@ void icount_configure(QemuOpts *opts, Error **errp)
|
|||
if (!option) {
|
||||
if (qemu_opt_get(opts, "align") != NULL) {
|
||||
error_setg(errp, "Please specify shift option when using align");
|
||||
return false;
|
||||
}
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (align && !sleep) {
|
||||
error_setg(errp, "align=on and sleep=off are incompatible");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strcmp(option, "auto") != 0) {
|
||||
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|
||||
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
|
||||
error_setg(errp, "icount: Invalid shift value");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
} else if (icount_align_option) {
|
||||
error_setg(errp, "shift=auto and align=on are incompatible");
|
||||
return;
|
||||
return false;
|
||||
} else if (!icount_sleep) {
|
||||
error_setg(errp, "shift=auto and sleep=off are incompatible");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
icount_sleep = sleep;
|
||||
|
@ -463,7 +462,7 @@ void icount_configure(QemuOpts *opts, Error **errp)
|
|||
if (time_shift >= 0) {
|
||||
timers_state.icount_time_shift = time_shift;
|
||||
icount_enable_precise();
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
icount_enable_adaptive();
|
||||
|
@ -491,11 +490,14 @@ void icount_configure(QemuOpts *opts, Error **errp)
|
|||
timer_mod(timers_state.icount_vm_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
NANOSECONDS_PER_SECOND / 10);
|
||||
return true;
|
||||
}
|
||||
|
||||
void icount_notify_exit(void)
|
||||
{
|
||||
if (icount_enabled() && current_cpu) {
|
||||
assert(icount_enabled());
|
||||
|
||||
if (current_cpu) {
|
||||
qemu_cpu_kick(current_cpu);
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
|
|||
|
||||
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'cputlb.c',
|
||||
'watchpoint.c',
|
||||
))
|
||||
|
||||
system_ss.add(when: ['CONFIG_TCG'], if_true: files(
|
||||
|
|
|
@ -1021,7 +1021,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
|||
* Called with mmap_lock held for user-mode emulation
|
||||
* NOTE: this function must not be called while a TB is running.
|
||||
*/
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
static void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
tb_page_addr_t start, last;
|
||||
|
||||
|
@ -1160,28 +1160,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical
|
||||
* address page @addr.
|
||||
*/
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||
{
|
||||
struct page_collection *pages;
|
||||
tb_page_addr_t start, last;
|
||||
PageDesc *p;
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
start = addr & TARGET_PAGE_MASK;
|
||||
last = addr | ~TARGET_PAGE_MASK;
|
||||
pages = page_collection_lock(start, last);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all TBs which intersect with the target physical address range
|
||||
* [start;last]. NOTE: start and end may refer to *different* physical pages.
|
||||
|
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* CPU watchpoints
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
/*
|
||||
* Return true if this watchpoint address matches the specified
|
||||
* access (ie the address range covered by the watchpoint overlaps
|
||||
* partially or completely with the address range covered by the
|
||||
* access).
|
||||
*/
|
||||
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
|
||||
vaddr addr, vaddr len)
|
||||
{
|
||||
/*
|
||||
* We know the lengths are non-zero, but a little caution is
|
||||
* required to avoid errors in the case where the range ends
|
||||
* exactly at the top of the address space and so addr + len
|
||||
* wraps round to zero.
|
||||
*/
|
||||
vaddr wpend = wp->vaddr + wp->len - 1;
|
||||
vaddr addrend = addr + len - 1;
|
||||
|
||||
return !(addr > wpend || wp->vaddr > addrend);
|
||||
}
|
||||
|
||||
/* Return flags for watchpoints that match addr + prot. */
|
||||
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
|
||||
{
|
||||
CPUWatchpoint *wp;
|
||||
int ret = 0;
|
||||
|
||||
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
|
||||
if (watchpoint_address_matches(wp, addr, len)) {
|
||||
ret |= wp->flags;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Generate a debug exception if a watchpoint has been hit. */
|
||||
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs attrs, int flags, uintptr_t ra)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUWatchpoint *wp;
|
||||
|
||||
assert(tcg_enabled());
|
||||
if (cpu->watchpoint_hit) {
|
||||
/*
|
||||
* We re-entered the check after replacing the TB.
|
||||
* Now raise the debug interrupt so that it will
|
||||
* trigger after the current instruction.
|
||||
*/
|
||||
bql_lock();
|
||||
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (cc->tcg_ops->adjust_watchpoint_address) {
|
||||
/* this is currently used only by ARM BE32 */
|
||||
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
|
||||
}
|
||||
|
||||
assert((flags & ~BP_MEM_ACCESS) == 0);
|
||||
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
|
||||
int hit_flags = wp->flags & flags;
|
||||
|
||||
if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
|
||||
if (replay_running_debug()) {
|
||||
/*
|
||||
* replay_breakpoint reads icount.
|
||||
* Force recompile to succeed, because icount may
|
||||
* be read only at the end of the block.
|
||||
*/
|
||||
if (!cpu->neg.can_do_io) {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
cpu_loop_exit_restore(cpu, ra);
|
||||
}
|
||||
/*
|
||||
* Don't process the watchpoints when we are
|
||||
* in a reverse debugging operation.
|
||||
*/
|
||||
replay_breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
wp->flags |= hit_flags << BP_HIT_SHIFT;
|
||||
wp->hitaddr = MAX(addr, wp->vaddr);
|
||||
wp->hitattrs = attrs;
|
||||
|
||||
if (wp->flags & BP_CPU
|
||||
&& cc->tcg_ops->debug_check_watchpoint
|
||||
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
|
||||
wp->flags &= ~BP_WATCHPOINT_HIT;
|
||||
continue;
|
||||
}
|
||||
cpu->watchpoint_hit = wp;
|
||||
|
||||
mmap_lock();
|
||||
/* This call also restores vCPU state */
|
||||
tb_check_watchpoint(cpu, ra);
|
||||
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
mmap_unlock();
|
||||
cpu_loop_exit(cpu);
|
||||
} else {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
}
|
||||
} else {
|
||||
wp->flags &= ~BP_WATCHPOINT_HIT;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -398,6 +398,7 @@ static void cryptodev_backend_set_ops(Object *obj, Visitor *v,
|
|||
static void
|
||||
cryptodev_backend_complete(UserCreatable *uc, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
|
||||
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
|
||||
uint32_t services;
|
||||
|
@ -406,11 +407,20 @@ cryptodev_backend_complete(UserCreatable *uc, Error **errp)
|
|||
QTAILQ_INIT(&backend->opinfos);
|
||||
value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
|
||||
cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
|
||||
cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (bc->init) {
|
||||
bc->init(backend, errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
services = backend->conf.crypto_services;
|
||||
|
|
|
@ -445,6 +445,7 @@ case "$cpu" in
|
|||
loongarch*)
|
||||
cpu=loongarch64
|
||||
host_arch=loongarch64
|
||||
linux_arch=loongarch
|
||||
;;
|
||||
|
||||
mips64*)
|
||||
|
|
36
cpu-target.c
36
cpu-target.c
|
@ -204,6 +204,7 @@ static Property cpu_common_props[] = {
|
|||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool cpu_get_start_powered_off(Object *obj, Error **errp)
|
||||
{
|
||||
CPUState *cpu = CPU(obj);
|
||||
|
@ -215,12 +216,13 @@ static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
|
|||
CPUState *cpu = CPU(obj);
|
||||
cpu->start_powered_off = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
void cpu_class_init_props(DeviceClass *dc)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
ObjectClass *oc = OBJECT_CLASS(dc);
|
||||
|
||||
device_class_set_props(dc, cpu_common_props);
|
||||
/*
|
||||
* We can't use DEFINE_PROP_BOOL in the Property array for this
|
||||
* property, because we want this to be settable after realize.
|
||||
|
@ -228,6 +230,9 @@ void cpu_class_init_props(DeviceClass *dc)
|
|||
object_class_property_add_bool(oc, "start-powered-off",
|
||||
cpu_get_start_powered_off,
|
||||
cpu_set_start_powered_off);
|
||||
#endif
|
||||
|
||||
device_class_set_props(dc, cpu_common_props);
|
||||
}
|
||||
|
||||
void cpu_exec_initfn(CPUState *cpu)
|
||||
|
@ -314,35 +319,6 @@ void list_cpus(void)
|
|||
cpu_list();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
void tb_invalidate_phys_addr(hwaddr addr)
|
||||
{
|
||||
mmap_lock();
|
||||
tb_invalidate_phys_page(addr);
|
||||
mmap_unlock();
|
||||
}
|
||||
#else
|
||||
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
||||
{
|
||||
ram_addr_t ram_addr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr l = 1;
|
||||
|
||||
if (!tcg_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
RCU_READ_LOCK_GUARD();
|
||||
mr = address_space_translate(as, addr, &addr, &l, false, attrs);
|
||||
if (!(memory_region_is_ram(mr)
|
||||
|| memory_region_is_romd(mr))) {
|
||||
return;
|
||||
}
|
||||
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
||||
tb_invalidate_phys_page(ram_addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* enable or disable single step mode. EXCP_DEBUG is returned by the
|
||||
CPU loop after each instruction */
|
||||
void cpu_single_step(CPUState *cpu, int enabled)
|
||||
|
|
|
@ -269,6 +269,11 @@ Nios II ``10m50-ghrd`` and ``nios2-generic-nommu`` machines (since 8.2)
|
|||
|
||||
The Nios II architecture is orphan.
|
||||
|
||||
``shix`` (since 9.0)
|
||||
''''''''''''''''''''
|
||||
|
||||
The machine is no longer in existence and has been long unmaintained
|
||||
in QEMU. This also holds for the TC51828 16MiB flash that it uses.
|
||||
|
||||
Backend options
|
||||
---------------
|
||||
|
|
|
@ -80,16 +80,39 @@ struct PFlashCFI01 {
|
|||
uint16_t ident3;
|
||||
uint8_t cfi_table[0x52];
|
||||
uint64_t counter;
|
||||
unsigned int writeblock_size;
|
||||
uint32_t writeblock_size;
|
||||
MemoryRegion mem;
|
||||
char *name;
|
||||
void *storage;
|
||||
VMChangeStateEntry *vmstate;
|
||||
bool old_multiple_chip_handling;
|
||||
|
||||
/* block update buffer */
|
||||
unsigned char *blk_bytes;
|
||||
uint32_t blk_offset;
|
||||
};
|
||||
|
||||
static int pflash_post_load(void *opaque, int version_id);
|
||||
|
||||
static bool pflash_blk_write_state_needed(void *opaque)
|
||||
{
|
||||
PFlashCFI01 *pfl = opaque;
|
||||
|
||||
return (pfl->blk_offset != -1);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_pflash_blk_write = {
|
||||
.name = "pflash_cfi01_blk_write",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = pflash_blk_write_state_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_VBUFFER_UINT32(blk_bytes, PFlashCFI01, 0, NULL, writeblock_size),
|
||||
VMSTATE_UINT32(blk_offset, PFlashCFI01),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_pflash = {
|
||||
.name = "pflash_cfi01",
|
||||
.version_id = 1,
|
||||
|
@ -101,6 +124,10 @@ static const VMStateDescription vmstate_pflash = {
|
|||
VMSTATE_UINT8(status, PFlashCFI01),
|
||||
VMSTATE_UINT64(counter, PFlashCFI01),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription * const []) {
|
||||
&vmstate_pflash_blk_write,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -225,34 +252,10 @@ static uint32_t pflash_data_read(PFlashCFI01 *pfl, hwaddr offset,
|
|||
uint32_t ret;
|
||||
|
||||
p = pfl->storage;
|
||||
switch (width) {
|
||||
case 1:
|
||||
ret = p[offset];
|
||||
break;
|
||||
case 2:
|
||||
if (be) {
|
||||
ret = p[offset] << 8;
|
||||
ret |= p[offset + 1];
|
||||
} else {
|
||||
ret = p[offset];
|
||||
ret |= p[offset + 1] << 8;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
if (be) {
|
||||
ret = p[offset] << 24;
|
||||
ret |= p[offset + 1] << 16;
|
||||
ret |= p[offset + 2] << 8;
|
||||
ret |= p[offset + 3];
|
||||
} else {
|
||||
ret = p[offset];
|
||||
ret |= p[offset + 1] << 8;
|
||||
ret |= p[offset + 2] << 16;
|
||||
ret |= p[offset + 3] << 24;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
if (be) {
|
||||
ret = ldn_be_p(p + offset, width);
|
||||
} else {
|
||||
ret = ldn_le_p(p + offset, width);
|
||||
}
|
||||
trace_pflash_data_read(pfl->name, offset, width, ret);
|
||||
return ret;
|
||||
|
@ -400,40 +403,61 @@ static void pflash_update(PFlashCFI01 *pfl, int offset,
|
|||
}
|
||||
}
|
||||
|
||||
/* copy current flash content to block update buffer */
|
||||
static void pflash_blk_write_start(PFlashCFI01 *pfl, hwaddr offset)
|
||||
{
|
||||
hwaddr mask = ~(pfl->writeblock_size - 1);
|
||||
|
||||
trace_pflash_write_block_start(pfl->name, pfl->counter);
|
||||
pfl->blk_offset = offset & mask;
|
||||
memcpy(pfl->blk_bytes, pfl->storage + pfl->blk_offset,
|
||||
pfl->writeblock_size);
|
||||
}
|
||||
|
||||
/* commit block update buffer changes */
|
||||
static void pflash_blk_write_flush(PFlashCFI01 *pfl)
|
||||
{
|
||||
g_assert(pfl->blk_offset != -1);
|
||||
trace_pflash_write_block_flush(pfl->name);
|
||||
memcpy(pfl->storage + pfl->blk_offset, pfl->blk_bytes,
|
||||
pfl->writeblock_size);
|
||||
pflash_update(pfl, pfl->blk_offset, pfl->writeblock_size);
|
||||
pfl->blk_offset = -1;
|
||||
}
|
||||
|
||||
/* discard block update buffer changes */
|
||||
static void pflash_blk_write_abort(PFlashCFI01 *pfl)
|
||||
{
|
||||
trace_pflash_write_block_abort(pfl->name);
|
||||
pfl->blk_offset = -1;
|
||||
}
|
||||
|
||||
static inline void pflash_data_write(PFlashCFI01 *pfl, hwaddr offset,
|
||||
uint32_t value, int width, int be)
|
||||
{
|
||||
uint8_t *p = pfl->storage;
|
||||
uint8_t *p;
|
||||
|
||||
trace_pflash_data_write(pfl->name, offset, width, value, pfl->counter);
|
||||
switch (width) {
|
||||
case 1:
|
||||
p[offset] = value;
|
||||
break;
|
||||
case 2:
|
||||
if (be) {
|
||||
p[offset] = value >> 8;
|
||||
p[offset + 1] = value;
|
||||
} else {
|
||||
p[offset] = value;
|
||||
p[offset + 1] = value >> 8;
|
||||
if (pfl->blk_offset != -1) {
|
||||
/* block write: redirect writes to block update buffer */
|
||||
if ((offset < pfl->blk_offset) ||
|
||||
(offset + width > pfl->blk_offset + pfl->writeblock_size)) {
|
||||
pfl->status |= 0x10; /* Programming error */
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
if (be) {
|
||||
p[offset] = value >> 24;
|
||||
p[offset + 1] = value >> 16;
|
||||
p[offset + 2] = value >> 8;
|
||||
p[offset + 3] = value;
|
||||
} else {
|
||||
p[offset] = value;
|
||||
p[offset + 1] = value >> 8;
|
||||
p[offset + 2] = value >> 16;
|
||||
p[offset + 3] = value >> 24;
|
||||
}
|
||||
break;
|
||||
trace_pflash_data_write_block(pfl->name, offset, width, value,
|
||||
pfl->counter);
|
||||
p = pfl->blk_bytes + (offset - pfl->blk_offset);
|
||||
} else {
|
||||
/* write directly to storage */
|
||||
trace_pflash_data_write(pfl->name, offset, width, value);
|
||||
p = pfl->storage + offset;
|
||||
}
|
||||
|
||||
if (be) {
|
||||
stn_be_p(p, width, value);
|
||||
} else {
|
||||
stn_le_p(p, width, value);
|
||||
}
|
||||
}
|
||||
|
||||
static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
|
||||
|
@ -548,9 +572,9 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
|
|||
} else {
|
||||
value = extract32(value, 0, pfl->bank_width * 8);
|
||||
}
|
||||
trace_pflash_write_block(pfl->name, value);
|
||||
pfl->counter = value;
|
||||
pfl->wcycle++;
|
||||
pflash_blk_write_start(pfl, offset);
|
||||
break;
|
||||
case 0x60:
|
||||
if (cmd == 0xd0) {
|
||||
|
@ -581,12 +605,7 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
|
|||
switch (pfl->cmd) {
|
||||
case 0xe8: /* Block write */
|
||||
/* FIXME check @offset, @width */
|
||||
if (!pfl->ro) {
|
||||
/*
|
||||
* FIXME writing straight to memory is *wrong*. We
|
||||
* should write to a buffer, and flush it to memory
|
||||
* only on confirm command (see below).
|
||||
*/
|
||||
if (!pfl->ro && (pfl->blk_offset != -1)) {
|
||||
pflash_data_write(pfl, offset, value, width, be);
|
||||
} else {
|
||||
pfl->status |= 0x10; /* Programming error */
|
||||
|
@ -595,18 +614,8 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
|
|||
pfl->status |= 0x80;
|
||||
|
||||
if (!pfl->counter) {
|
||||
hwaddr mask = pfl->writeblock_size - 1;
|
||||
mask = ~mask;
|
||||
|
||||
trace_pflash_write(pfl->name, "block write finished");
|
||||
pfl->wcycle++;
|
||||
if (!pfl->ro) {
|
||||
/* Flush the entire write buffer onto backing storage. */
|
||||
/* FIXME premature! */
|
||||
pflash_update(pfl, offset & mask, pfl->writeblock_size);
|
||||
} else {
|
||||
pfl->status |= 0x10; /* Programming error */
|
||||
}
|
||||
}
|
||||
|
||||
pfl->counter--;
|
||||
|
@ -618,20 +627,17 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
|
|||
case 3: /* Confirm mode */
|
||||
switch (pfl->cmd) {
|
||||
case 0xe8: /* Block write */
|
||||
if (cmd == 0xd0) {
|
||||
/* FIXME this is where we should write out the buffer */
|
||||
if ((cmd == 0xd0) && !(pfl->status & 0x10)) {
|
||||
pflash_blk_write_flush(pfl);
|
||||
pfl->wcycle = 0;
|
||||
pfl->status |= 0x80;
|
||||
} else {
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"%s: Aborting write to buffer not implemented,"
|
||||
" the data is already written to storage!\n"
|
||||
"Flash device reset into READ mode.\n",
|
||||
__func__);
|
||||
pflash_blk_write_abort(pfl);
|
||||
goto mode_read_array;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pflash_blk_write_abort(pfl);
|
||||
goto error_flash;
|
||||
}
|
||||
break;
|
||||
|
@ -865,6 +871,9 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
|
|||
pfl->cmd = 0x00;
|
||||
pfl->status = 0x80; /* WSM ready */
|
||||
pflash_cfi01_fill_cfi_table(pfl);
|
||||
|
||||
pfl->blk_bytes = g_malloc(pfl->writeblock_size);
|
||||
pfl->blk_offset = -1;
|
||||
}
|
||||
|
||||
static void pflash_cfi01_system_reset(DeviceState *dev)
|
||||
|
@ -884,6 +893,8 @@ static void pflash_cfi01_system_reset(DeviceState *dev)
|
|||
* This model deliberately ignores this delay.
|
||||
*/
|
||||
pfl->status = 0x80;
|
||||
|
||||
pfl->blk_offset = -1;
|
||||
}
|
||||
|
||||
static Property pflash_cfi01_properties[] = {
|
||||
|
|
|
@ -546,7 +546,7 @@ static void pflash_write(void *opaque, hwaddr offset, uint64_t value,
|
|||
}
|
||||
goto reset_flash;
|
||||
}
|
||||
trace_pflash_data_write(pfl->name, offset, width, value, 0);
|
||||
trace_pflash_data_write(pfl->name, offset, width, value);
|
||||
if (!pfl->ro) {
|
||||
p = (uint8_t *)pfl->storage + offset;
|
||||
if (pfl->be) {
|
||||
|
|
|
@ -202,6 +202,7 @@ static sh7750_io_device tc58128 = {
|
|||
|
||||
int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2)
|
||||
{
|
||||
warn_report_once("The TC58128 flash device is deprecated");
|
||||
init_dev(&tc58128_devs[0], zone1);
|
||||
init_dev(&tc58128_devs[1], zone2);
|
||||
return sh7750_register_io_device(s, &tc58128);
|
||||
|
|
|
@ -12,7 +12,8 @@ fdctrl_tc_pulse(int level) "TC pulse: %u"
|
|||
pflash_chip_erase_invalid(const char *name, uint64_t offset) "%s: chip erase: invalid address 0x%" PRIx64
|
||||
pflash_chip_erase_start(const char *name) "%s: start chip erase"
|
||||
pflash_data_read(const char *name, uint64_t offset, unsigned size, uint32_t value) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x"
|
||||
pflash_data_write(const char *name, uint64_t offset, unsigned size, uint32_t value, uint64_t counter) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x counter:0x%016"PRIx64
|
||||
pflash_data_write(const char *name, uint64_t offset, unsigned size, uint32_t value) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x"
|
||||
pflash_data_write_block(const char *name, uint64_t offset, unsigned size, uint32_t value, uint64_t counter) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x counter:0x%016"PRIx64
|
||||
pflash_device_id(const char *name, uint16_t id) "%s: read device ID: 0x%04x"
|
||||
pflash_device_info(const char *name, uint64_t offset) "%s: read device information offset:0x%04" PRIx64
|
||||
pflash_erase_complete(const char *name) "%s: sector erase complete"
|
||||
|
@ -32,7 +33,9 @@ pflash_unlock0_failed(const char *name, uint64_t offset, uint8_t cmd, uint16_t a
|
|||
pflash_unlock1_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: unlock0 failed 0x%" PRIx64 " 0x%02x"
|
||||
pflash_unsupported_device_configuration(const char *name, uint8_t width, uint8_t max) "%s: unsupported device configuration: device_width:%d max_device_width:%d"
|
||||
pflash_write(const char *name, const char *str) "%s: %s"
|
||||
pflash_write_block(const char *name, uint32_t value) "%s: block write: bytes:0x%x"
|
||||
pflash_write_block_start(const char *name, uint32_t value) "%s: block write start: bytes:0x%x"
|
||||
pflash_write_block_flush(const char *name) "%s: block write flush"
|
||||
pflash_write_block_abort(const char *name) "%s: block write abort"
|
||||
pflash_write_block_erase(const char *name, uint64_t offset, uint64_t len) "%s: block erase offset:0x%" PRIx64 " bytes:0x%" PRIx64
|
||||
pflash_write_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: command failed 0x%" PRIx64 " 0x%02x"
|
||||
pflash_write_invalid(const char *name, uint8_t cmd) "%s: invalid write for command 0x%02x"
|
||||
|
|
|
@ -273,7 +273,7 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
|
|||
return cpu->cpu_index;
|
||||
}
|
||||
|
||||
static void cpu_class_init(ObjectClass *klass, void *data)
|
||||
static void cpu_common_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
||||
|
@ -304,7 +304,7 @@ static const TypeInfo cpu_type_info = {
|
|||
.instance_finalize = cpu_common_finalize,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(CPUClass),
|
||||
.class_init = cpu_class_init,
|
||||
.class_init = cpu_common_class_init,
|
||||
};
|
||||
|
||||
static void cpu_register_types(void)
|
||||
|
|
|
@ -92,13 +92,10 @@ static void piix_intx_routing_notifier_xen(PCIDevice *dev)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Scan for updates to PCI link routes (0x60-0x63). */
|
||||
/* Scan for updates to PCI link routes. */
|
||||
for (i = 0; i < PIIX_NUM_PIRQS; i++) {
|
||||
uint8_t v = dev->config_read(dev, PIIX_PIRQCA + i, 1);
|
||||
if (v & 0x80) {
|
||||
v = 0;
|
||||
}
|
||||
v &= 0xf;
|
||||
const PCIINTxRoute route = pci_device_route_intx_to_irq(dev, i);
|
||||
const uint8_t v = route.mode == PCI_INTX_ENABLED ? route.irq : 0;
|
||||
xen_set_pci_link_route(i, v);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void cpu_class_init(ObjectClass *oc, void *data)
|
||||
static void sclp_cpu_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
SCLPEventClass *k = SCLP_EVENT_CLASS(oc);
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
@ -94,7 +94,7 @@ static const TypeInfo sclp_cpu_info = {
|
|||
.name = TYPE_SCLP_CPU_HOTPLUG,
|
||||
.parent = TYPE_SCLP_EVENT,
|
||||
.instance_size = sizeof(SCLPEvent),
|
||||
.class_init = cpu_class_init,
|
||||
.class_init = sclp_cpu_class_init,
|
||||
.class_size = sizeof(SCLPEventClass),
|
||||
};
|
||||
|
||||
|
|
|
@ -77,6 +77,41 @@ struct PCIESPState {
|
|||
ESPState esp;
|
||||
};
|
||||
|
||||
static void esp_pci_update_irq(PCIESPState *pci)
|
||||
{
|
||||
int scsi_level = !!(pci->dma_regs[DMA_STAT] & DMA_STAT_SCSIINT);
|
||||
int dma_level = (pci->dma_regs[DMA_CMD] & DMA_CMD_INTE_D) ?
|
||||
!!(pci->dma_regs[DMA_STAT] & DMA_STAT_DONE) : 0;
|
||||
int level = scsi_level || dma_level;
|
||||
|
||||
pci_set_irq(PCI_DEVICE(pci), level);
|
||||
}
|
||||
|
||||
static void esp_irq_handler(void *opaque, int irq_num, int level)
|
||||
{
|
||||
PCIESPState *pci = PCI_ESP(opaque);
|
||||
|
||||
if (level) {
|
||||
pci->dma_regs[DMA_STAT] |= DMA_STAT_SCSIINT;
|
||||
|
||||
/*
|
||||
* If raising the ESP IRQ to indicate end of DMA transfer, set
|
||||
* DMA_STAT_DONE at the same time. In theory this should be done in
|
||||
* esp_pci_dma_memory_rw(), however there is a delay between setting
|
||||
* DMA_STAT_DONE and the ESP IRQ arriving which is visible to the
|
||||
* guest that can cause confusion e.g. Linux
|
||||
*/
|
||||
if ((pci->dma_regs[DMA_CMD] & DMA_CMD_MASK) == 0x3 &&
|
||||
pci->dma_regs[DMA_WBC] == 0) {
|
||||
pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
|
||||
}
|
||||
} else {
|
||||
pci->dma_regs[DMA_STAT] &= ~DMA_STAT_SCSIINT;
|
||||
}
|
||||
|
||||
esp_pci_update_irq(pci);
|
||||
}
|
||||
|
||||
static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
|
||||
{
|
||||
ESPState *s = &pci->esp;
|
||||
|
@ -89,6 +124,7 @@ static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
|
|||
{
|
||||
trace_esp_pci_dma_blast(val);
|
||||
qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
|
||||
pci->dma_regs[DMA_STAT] |= DMA_STAT_BCMBLT;
|
||||
}
|
||||
|
||||
static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
|
||||
|
@ -151,6 +187,7 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
|
|||
/* clear some bits on write */
|
||||
uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
|
||||
pci->dma_regs[DMA_STAT] &= ~(val & mask);
|
||||
esp_pci_update_irq(pci);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -161,17 +198,14 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
|
|||
|
||||
static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
|
||||
{
|
||||
ESPState *s = &pci->esp;
|
||||
uint32_t val;
|
||||
|
||||
val = pci->dma_regs[saddr];
|
||||
if (saddr == DMA_STAT) {
|
||||
if (s->rregs[ESP_RSTAT] & STAT_INT) {
|
||||
val |= DMA_STAT_SCSIINT;
|
||||
}
|
||||
if (!(pci->sbac & SBAC_STATUS)) {
|
||||
pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
|
||||
DMA_STAT_DONE);
|
||||
esp_pci_update_irq(pci);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,7 +309,7 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
|
|||
qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
|
||||
}
|
||||
|
||||
addr = pci->dma_regs[DMA_SPA];
|
||||
addr = pci->dma_regs[DMA_WAC];
|
||||
if (pci->dma_regs[DMA_WBC] < len) {
|
||||
len = pci->dma_regs[DMA_WBC];
|
||||
}
|
||||
|
@ -285,9 +319,6 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
|
|||
/* update status registers */
|
||||
pci->dma_regs[DMA_WBC] -= len;
|
||||
pci->dma_regs[DMA_WAC] += len;
|
||||
if (pci->dma_regs[DMA_WBC] == 0) {
|
||||
pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
|
||||
|
@ -342,23 +373,13 @@ static const VMStateDescription vmstate_esp_pci_scsi = {
|
|||
}
|
||||
};
|
||||
|
||||
static void esp_pci_command_complete(SCSIRequest *req, size_t resid)
|
||||
{
|
||||
ESPState *s = req->hba_private;
|
||||
PCIESPState *pci = container_of(s, PCIESPState, esp);
|
||||
|
||||
esp_command_complete(req, resid);
|
||||
pci->dma_regs[DMA_WBC] = 0;
|
||||
pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
|
||||
}
|
||||
|
||||
static const struct SCSIBusInfo esp_pci_scsi_info = {
|
||||
.tcq = false,
|
||||
.max_target = ESP_MAX_DEVS,
|
||||
.max_lun = 7,
|
||||
|
||||
.transfer_data = esp_transfer_data,
|
||||
.complete = esp_pci_command_complete,
|
||||
.complete = esp_command_complete,
|
||||
.cancel = esp_request_cancelled,
|
||||
};
|
||||
|
||||
|
@ -386,7 +407,7 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
|
|||
"esp-io", 0x80);
|
||||
|
||||
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
|
||||
s->irq = pci_allocate_irq(dev);
|
||||
s->irq = qemu_allocate_irq(esp_irq_handler, pci, 0);
|
||||
|
||||
scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info);
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ static void shix_machine_init(MachineClass *mc)
|
|||
mc->init = shix_init;
|
||||
mc->is_default = true;
|
||||
mc->default_cpu_type = TYPE_SH7750R_CPU;
|
||||
mc->deprecation_reason = "old and unmaintained";
|
||||
}
|
||||
|
||||
DEFINE_MACHINE("shix", shix_machine_init)
|
||||
|
|
|
@ -39,13 +39,7 @@
|
|||
#include "hw/timer/i8254.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
//#define HPET_DEBUG
|
||||
#ifdef HPET_DEBUG
|
||||
#define DPRINTF printf
|
||||
#else
|
||||
#define DPRINTF(...)
|
||||
#endif
|
||||
#include "trace.h"
|
||||
|
||||
#define HPET_MSI_SUPPORT 0
|
||||
|
||||
|
@ -431,7 +425,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
|
|||
HPETState *s = opaque;
|
||||
uint64_t cur_tick, index;
|
||||
|
||||
DPRINTF("qemu: Enter hpet_ram_readl at %" PRIx64 "\n", addr);
|
||||
trace_hpet_ram_read(addr);
|
||||
index = addr;
|
||||
/*address range of all TN regs*/
|
||||
if (index >= 0x100 && index <= 0x3ff) {
|
||||
|
@ -439,7 +433,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
|
|||
HPETTimer *timer = &s->timer[timer_id];
|
||||
|
||||
if (timer_id > s->num_timers) {
|
||||
DPRINTF("qemu: timer id out of range\n");
|
||||
trace_hpet_timer_id_out_of_range(timer_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -457,7 +451,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
|
|||
case HPET_TN_ROUTE + 4:
|
||||
return timer->fsb >> 32;
|
||||
default:
|
||||
DPRINTF("qemu: invalid hpet_ram_readl\n");
|
||||
trace_hpet_ram_read_invalid();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -469,7 +463,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
|
|||
case HPET_CFG:
|
||||
return s->config;
|
||||
case HPET_CFG + 4:
|
||||
DPRINTF("qemu: invalid HPET_CFG + 4 hpet_ram_readl\n");
|
||||
trace_hpet_invalid_hpet_cfg(4);
|
||||
return 0;
|
||||
case HPET_COUNTER:
|
||||
if (hpet_enabled(s)) {
|
||||
|
@ -477,7 +471,7 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
|
|||
} else {
|
||||
cur_tick = s->hpet_counter;
|
||||
}
|
||||
DPRINTF("qemu: reading counter = %" PRIx64 "\n", cur_tick);
|
||||
trace_hpet_ram_read_reading_counter(0, cur_tick);
|
||||
return cur_tick;
|
||||
case HPET_COUNTER + 4:
|
||||
if (hpet_enabled(s)) {
|
||||
|
@ -485,12 +479,12 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr,
|
|||
} else {
|
||||
cur_tick = s->hpet_counter;
|
||||
}
|
||||
DPRINTF("qemu: reading counter + 4 = %" PRIx64 "\n", cur_tick);
|
||||
trace_hpet_ram_read_reading_counter(4, cur_tick);
|
||||
return cur_tick >> 32;
|
||||
case HPET_STATUS:
|
||||
return s->isr;
|
||||
default:
|
||||
DPRINTF("qemu: invalid hpet_ram_readl\n");
|
||||
trace_hpet_ram_read_invalid();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -504,8 +498,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
HPETState *s = opaque;
|
||||
uint64_t old_val, new_val, val, index;
|
||||
|
||||
DPRINTF("qemu: Enter hpet_ram_writel at %" PRIx64 " = 0x%" PRIx64 "\n",
|
||||
addr, value);
|
||||
trace_hpet_ram_write(addr, value);
|
||||
index = addr;
|
||||
old_val = hpet_ram_read(opaque, addr, 4);
|
||||
new_val = value;
|
||||
|
@ -515,14 +508,14 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
uint8_t timer_id = (addr - 0x100) / 0x20;
|
||||
HPETTimer *timer = &s->timer[timer_id];
|
||||
|
||||
DPRINTF("qemu: hpet_ram_writel timer_id = 0x%x\n", timer_id);
|
||||
trace_hpet_ram_write_timer_id(timer_id);
|
||||
if (timer_id > s->num_timers) {
|
||||
DPRINTF("qemu: timer id out of range\n");
|
||||
trace_hpet_timer_id_out_of_range(timer_id);
|
||||
return;
|
||||
}
|
||||
switch ((addr - 0x100) % 0x20) {
|
||||
case HPET_TN_CFG:
|
||||
DPRINTF("qemu: hpet_ram_writel HPET_TN_CFG\n");
|
||||
trace_hpet_ram_write_tn_cfg();
|
||||
if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) {
|
||||
update_irq(timer, 0);
|
||||
}
|
||||
|
@ -540,10 +533,10 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
}
|
||||
break;
|
||||
case HPET_TN_CFG + 4: // Interrupt capabilities
|
||||
DPRINTF("qemu: invalid HPET_TN_CFG+4 write\n");
|
||||
trace_hpet_ram_write_invalid_tn_cfg(4);
|
||||
break;
|
||||
case HPET_TN_CMP: // comparator register
|
||||
DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP\n");
|
||||
trace_hpet_ram_write_tn_cmp(0);
|
||||
if (timer->config & HPET_TN_32BIT) {
|
||||
new_val = (uint32_t)new_val;
|
||||
}
|
||||
|
@ -566,7 +559,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
}
|
||||
break;
|
||||
case HPET_TN_CMP + 4: // comparator register high order
|
||||
DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP + 4\n");
|
||||
trace_hpet_ram_write_tn_cmp(4);
|
||||
if (!timer_is_periodic(timer)
|
||||
|| (timer->config & HPET_TN_SETVAL)) {
|
||||
timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32;
|
||||
|
@ -591,7 +584,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff);
|
||||
break;
|
||||
default:
|
||||
DPRINTF("qemu: invalid hpet_ram_writel\n");
|
||||
trace_hpet_ram_write_invalid();
|
||||
break;
|
||||
}
|
||||
return;
|
||||
|
@ -631,7 +624,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
}
|
||||
break;
|
||||
case HPET_CFG + 4:
|
||||
DPRINTF("qemu: invalid HPET_CFG+4 write\n");
|
||||
trace_hpet_invalid_hpet_cfg(4);
|
||||
break;
|
||||
case HPET_STATUS:
|
||||
val = new_val & s->isr;
|
||||
|
@ -643,24 +636,20 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
|
|||
break;
|
||||
case HPET_COUNTER:
|
||||
if (hpet_enabled(s)) {
|
||||
DPRINTF("qemu: Writing counter while HPET enabled!\n");
|
||||
trace_hpet_ram_write_counter_write_while_enabled();
|
||||
}
|
||||
s->hpet_counter =
|
||||
(s->hpet_counter & 0xffffffff00000000ULL) | value;
|
||||
DPRINTF("qemu: HPET counter written. ctr = 0x%" PRIx64 " -> "
|
||||
"%" PRIx64 "\n", value, s->hpet_counter);
|
||||
trace_hpet_ram_write_counter_written(0, value, s->hpet_counter);
|
||||
break;
|
||||
case HPET_COUNTER + 4:
|
||||
if (hpet_enabled(s)) {
|
||||
DPRINTF("qemu: Writing counter while HPET enabled!\n");
|
||||
}
|
||||
trace_hpet_ram_write_counter_write_while_enabled();
|
||||
s->hpet_counter =
|
||||
(s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32);
|
||||
DPRINTF("qemu: HPET counter + 4 written. ctr = 0x%" PRIx64 " -> "
|
||||
"%" PRIx64 "\n", value, s->hpet_counter);
|
||||
trace_hpet_ram_write_counter_written(4, value, s->hpet_counter);
|
||||
break;
|
||||
default:
|
||||
DPRINTF("qemu: invalid hpet_ram_writel\n");
|
||||
trace_hpet_ram_write_invalid();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -99,3 +99,18 @@ sifive_pwm_write(uint64_t data, uint64_t offset) "Write 0x%" PRIx64 " at address
|
|||
sh_timer_start_stop(int enable, int current) "%d (%d)"
|
||||
sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64
|
||||
sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64
|
||||
|
||||
# hpet.c
|
||||
hpet_timer_id_out_of_range(uint8_t timer_id) "timer id out of range: 0x%" PRIx8
|
||||
hpet_invalid_hpet_cfg(uint8_t reg_off) "invalid HPET_CFG + %u" PRIx8
|
||||
hpet_ram_read(uint64_t addr) "enter hpet_ram_readl at 0x%" PRIx64
|
||||
hpet_ram_read_reading_counter(uint8_t reg_off, uint64_t cur_tick) "reading counter + %" PRIu8 " = 0x%" PRIx64
|
||||
hpet_ram_read_invalid(void) "invalid hpet_ram_readl"
|
||||
hpet_ram_write(uint64_t addr, uint64_t value) "enter hpet_ram_writel at 0x%" PRIx64 " = 0x%" PRIx64
|
||||
hpet_ram_write_timer_id(uint64_t timer_id) "hpet_ram_writel timer_id = 0x%" PRIx64
|
||||
hpet_ram_write_tn_cfg(void) "hpet_ram_writel HPET_TN_CFG"
|
||||
hpet_ram_write_invalid_tn_cfg(uint8_t reg_off) "invalid HPET_TN_CFG + %" PRIu8 " write"
|
||||
hpet_ram_write_tn_cmp(uint8_t reg_off) "hpet_ram_writel HPET_TN_CMP + %" PRIu8
|
||||
hpet_ram_write_invalid(void) "invalid hpet_ram_writel"
|
||||
hpet_ram_write_counter_write_while_enabled(void) "Writing counter while HPET enabled!"
|
||||
hpet_ram_write_counter_written(uint8_t reg_off, uint64_t value, uint64_t counter) "HPET counter + %" PRIu8 "written. crt = 0x%" PRIx64 " -> 0x%" PRIx64
|
||||
|
|
|
@ -518,11 +518,6 @@ static inline void tb_set_page_addr1(TranslationBlock *tb,
|
|||
uint32_t curr_cflags(CPUState *cpu);
|
||||
|
||||
/* TranslationBlock invalidate API */
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
void tb_invalidate_phys_addr(hwaddr addr);
|
||||
#else
|
||||
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
|
||||
#endif
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
|
||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
|
||||
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
|
||||
|
||||
/* translate-all.c */
|
||||
void tb_invalidate_phys_page(tb_page_addr_t addr);
|
||||
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
|
|
|
@ -427,6 +427,16 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd,
|
|||
file_size = ph->p_filesz; /* Size of the allocated data */
|
||||
data_offset = ph->p_offset; /* Offset where the data is located */
|
||||
|
||||
/*
|
||||
* Some ELF files really do have segments of zero size;
|
||||
* just ignore them rather than trying to set the wrong addr,
|
||||
* or create empty ROM blobs, because the zero-length blob can
|
||||
* falsely trigger the overlapping-ROM-blobs check.
|
||||
*/
|
||||
if (mem_size == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (file_size > 0) {
|
||||
if (g_mapped_file_get_length(mapped_file) <
|
||||
file_size + data_offset) {
|
||||
|
@ -530,45 +540,38 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd,
|
|||
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
|
||||
}
|
||||
|
||||
/* Some ELF files really do have segments of zero size;
|
||||
* just ignore them rather than trying to create empty
|
||||
* ROM blobs, because the zero-length blob can falsely
|
||||
* trigger the overlapping-ROM-blobs check.
|
||||
*/
|
||||
if (mem_size != 0) {
|
||||
if (load_rom) {
|
||||
g_autofree char *label =
|
||||
g_strdup_printf("%s ELF program header segment %d",
|
||||
name, i);
|
||||
if (load_rom) {
|
||||
g_autofree char *label =
|
||||
g_strdup_printf("%s ELF program header segment %d",
|
||||
name, i);
|
||||
|
||||
/*
|
||||
* rom_add_elf_program() takes its own reference to
|
||||
* 'mapped_file'.
|
||||
*/
|
||||
rom_add_elf_program(label, mapped_file, data, file_size,
|
||||
mem_size, addr, as);
|
||||
} else {
|
||||
MemTxResult res;
|
||||
/*
|
||||
* rom_add_elf_program() takes its own reference to
|
||||
* 'mapped_file'.
|
||||
*/
|
||||
rom_add_elf_program(label, mapped_file, data, file_size,
|
||||
mem_size, addr, as);
|
||||
} else {
|
||||
MemTxResult res;
|
||||
|
||||
res = address_space_write(as ? as : &address_space_memory,
|
||||
addr, MEMTXATTRS_UNSPECIFIED,
|
||||
data, file_size);
|
||||
res = address_space_write(as ? as : &address_space_memory,
|
||||
addr, MEMTXATTRS_UNSPECIFIED,
|
||||
data, file_size);
|
||||
if (res != MEMTX_OK) {
|
||||
goto fail;
|
||||
}
|
||||
/*
|
||||
* We need to zero'ify the space that is not copied
|
||||
* from file
|
||||
*/
|
||||
if (file_size < mem_size) {
|
||||
res = address_space_set(as ? as : &address_space_memory,
|
||||
addr + file_size, 0,
|
||||
mem_size - file_size,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
if (res != MEMTX_OK) {
|
||||
goto fail;
|
||||
}
|
||||
/*
|
||||
* We need to zero'ify the space that is not copied
|
||||
* from file
|
||||
*/
|
||||
if (file_size < mem_size) {
|
||||
res = address_space_set(as ? as : &address_space_memory,
|
||||
addr + file_size, 0,
|
||||
mem_size - file_size,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
if (res != MEMTX_OK) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,18 +17,24 @@ void cpu_timers_init(void);
|
|||
|
||||
/* icount - Instruction Counter API */
|
||||
|
||||
/*
|
||||
* icount enablement state:
|
||||
/**
|
||||
* ICountMode: icount enablement state:
|
||||
*
|
||||
* 0 = Disabled - Do not count executed instructions.
|
||||
* 1 = Enabled - Fixed conversion of insn to ns via "shift" option
|
||||
* 2 = Enabled - Runtime adaptive algorithm to compute shift
|
||||
* @ICOUNT_DISABLED: Disabled - Do not count executed instructions.
|
||||
* @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option
|
||||
* @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift
|
||||
*/
|
||||
#ifdef CONFIG_TCG
|
||||
extern int use_icount;
|
||||
typedef enum {
|
||||
ICOUNT_DISABLED = 0,
|
||||
ICOUNT_PRECISE,
|
||||
ICOUNT_ADAPTATIVE,
|
||||
} ICountMode;
|
||||
|
||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
extern ICountMode use_icount;
|
||||
#define icount_enabled() (use_icount)
|
||||
#else
|
||||
#define icount_enabled() 0
|
||||
#define icount_enabled() ICOUNT_DISABLED
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -50,8 +56,14 @@ int64_t icount_get(void);
|
|||
*/
|
||||
int64_t icount_to_ns(int64_t icount);
|
||||
|
||||
/* configure the icount options, including "shift" */
|
||||
void icount_configure(QemuOpts *opts, Error **errp);
|
||||
/**
|
||||
* icount_configure: configure the icount options, including "shift"
|
||||
* @opts: Options to parse
|
||||
* @errp: pointer to a NULL-initialized error object
|
||||
*
|
||||
* Return: true on success, else false setting @errp with error
|
||||
*/
|
||||
bool icount_configure(QemuOpts *opts, Error **errp);
|
||||
|
||||
/* used by tcg vcpu thread to calc icount budget */
|
||||
int64_t icount_round(int64_t count);
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
#ifndef SYSEMU_REPLAY_H
|
||||
#define SYSEMU_REPLAY_H
|
||||
|
||||
/*
|
||||
* QEMU replay (system interface)
|
||||
*
|
||||
|
@ -11,6 +8,12 @@
|
|||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
#ifndef SYSEMU_REPLAY_H
|
||||
#define SYSEMU_REPLAY_H
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#error Cannot include this header from user emulation
|
||||
#endif
|
||||
|
||||
#include "exec/replay-core.h"
|
||||
#include "qapi/qapi-types-misc.h"
|
||||
|
@ -84,12 +87,14 @@ int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
|
|||
int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount);
|
||||
/*! Saves or reads the clock depending on the current replay mode. */
|
||||
#define REPLAY_CLOCK(clock, value) \
|
||||
!icount_enabled() ? (value) : \
|
||||
(replay_mode == REPLAY_MODE_PLAY \
|
||||
? replay_read_clock((clock), icount_get_raw()) \
|
||||
: replay_mode == REPLAY_MODE_RECORD \
|
||||
? replay_save_clock((clock), (value), icount_get_raw()) \
|
||||
: (value))
|
||||
#define REPLAY_CLOCK_LOCKED(clock, value) \
|
||||
!icount_enabled() ? (value) : \
|
||||
(replay_mode == REPLAY_MODE_PLAY \
|
||||
? replay_read_clock((clock), icount_get_raw_locked()) \
|
||||
: replay_mode == REPLAY_MODE_RECORD \
|
||||
|
|
|
@ -4,37 +4,20 @@
|
|||
|
||||
/* icount - Instruction Counter API */
|
||||
|
||||
int use_icount;
|
||||
ICountMode use_icount = ICOUNT_DISABLED;
|
||||
|
||||
void icount_update(CPUState *cpu)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
void icount_configure(QemuOpts *opts, Error **errp)
|
||||
bool icount_configure(QemuOpts *opts, Error **errp)
|
||||
{
|
||||
/* signal error */
|
||||
error_setg(errp, "cannot configure icount, TCG support not available");
|
||||
|
||||
return false;
|
||||
}
|
||||
int64_t icount_get_raw(void)
|
||||
{
|
||||
abort();
|
||||
return 0;
|
||||
}
|
||||
int64_t icount_get(void)
|
||||
{
|
||||
abort();
|
||||
return 0;
|
||||
}
|
||||
int64_t icount_to_ns(int64_t icount)
|
||||
{
|
||||
abort();
|
||||
return 0;
|
||||
}
|
||||
int64_t icount_round(int64_t count)
|
||||
{
|
||||
abort();
|
||||
return 0;
|
||||
}
|
||||
void icount_start_warp_timer(void)
|
||||
{
|
||||
abort();
|
||||
|
@ -43,7 +26,7 @@ void icount_account_warp_timer(void)
|
|||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
void icount_notify_exit(void)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ static bool adjust_timers_state_needed(void *opaque)
|
|||
|
||||
static bool icount_shift_state_needed(void *opaque)
|
||||
{
|
||||
return icount_enabled() == 2;
|
||||
return icount_enabled() == ICOUNT_ADAPTATIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2270,8 +2270,7 @@ static void user_register_global_props(void)
|
|||
|
||||
static int do_configure_icount(void *opaque, QemuOpts *opts, Error **errp)
|
||||
{
|
||||
icount_configure(opts, errp);
|
||||
return 0;
|
||||
return !icount_configure(opts, errp);
|
||||
}
|
||||
|
||||
static int accelerator_set_property(void *opaque,
|
||||
|
|
|
@ -18,13 +18,8 @@
|
|||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "hw/core/tcg-cpu-ops.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
/* Add a watchpoint. */
|
||||
|
@ -103,122 +98,3 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
|
||||
/*
|
||||
* Return true if this watchpoint address matches the specified
|
||||
* access (ie the address range covered by the watchpoint overlaps
|
||||
* partially or completely with the address range covered by the
|
||||
* access).
|
||||
*/
|
||||
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
|
||||
vaddr addr, vaddr len)
|
||||
{
|
||||
/*
|
||||
* We know the lengths are non-zero, but a little caution is
|
||||
* required to avoid errors in the case where the range ends
|
||||
* exactly at the top of the address space and so addr + len
|
||||
* wraps round to zero.
|
||||
*/
|
||||
vaddr wpend = wp->vaddr + wp->len - 1;
|
||||
vaddr addrend = addr + len - 1;
|
||||
|
||||
return !(addr > wpend || wp->vaddr > addrend);
|
||||
}
|
||||
|
||||
/* Return flags for watchpoints that match addr + prot. */
|
||||
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
|
||||
{
|
||||
CPUWatchpoint *wp;
|
||||
int ret = 0;
|
||||
|
||||
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
|
||||
if (watchpoint_address_matches(wp, addr, len)) {
|
||||
ret |= wp->flags;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Generate a debug exception if a watchpoint has been hit. */
|
||||
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs attrs, int flags, uintptr_t ra)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUWatchpoint *wp;
|
||||
|
||||
assert(tcg_enabled());
|
||||
if (cpu->watchpoint_hit) {
|
||||
/*
|
||||
* We re-entered the check after replacing the TB.
|
||||
* Now raise the debug interrupt so that it will
|
||||
* trigger after the current instruction.
|
||||
*/
|
||||
bql_lock();
|
||||
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (cc->tcg_ops->adjust_watchpoint_address) {
|
||||
/* this is currently used only by ARM BE32 */
|
||||
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
|
||||
}
|
||||
|
||||
assert((flags & ~BP_MEM_ACCESS) == 0);
|
||||
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
|
||||
int hit_flags = wp->flags & flags;
|
||||
|
||||
if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
|
||||
if (replay_running_debug()) {
|
||||
/*
|
||||
* replay_breakpoint reads icount.
|
||||
* Force recompile to succeed, because icount may
|
||||
* be read only at the end of the block.
|
||||
*/
|
||||
if (!cpu->neg.can_do_io) {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
cpu_loop_exit_restore(cpu, ra);
|
||||
}
|
||||
/*
|
||||
* Don't process the watchpoints when we are
|
||||
* in a reverse debugging operation.
|
||||
*/
|
||||
replay_breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
wp->flags |= hit_flags << BP_HIT_SHIFT;
|
||||
wp->hitaddr = MAX(addr, wp->vaddr);
|
||||
wp->hitattrs = attrs;
|
||||
|
||||
if (wp->flags & BP_CPU
|
||||
&& cc->tcg_ops->debug_check_watchpoint
|
||||
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
|
||||
wp->flags &= ~BP_WATCHPOINT_HIT;
|
||||
continue;
|
||||
}
|
||||
cpu->watchpoint_hit = wp;
|
||||
|
||||
mmap_lock();
|
||||
/* This call also restores vCPU state */
|
||||
tb_check_watchpoint(cpu, ra);
|
||||
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
mmap_unlock();
|
||||
cpu_loop_exit(cpu);
|
||||
} else {
|
||||
/* Force execution of one insn next time. */
|
||||
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit_noexc(cpu);
|
||||
}
|
||||
} else {
|
||||
wp->flags &= ~BP_WATCHPOINT_HIT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TCG */
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* QEMU Alpha clock helpers.
|
||||
*
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "cpu.h"
|
||||
|
||||
uint64_t helper_load_pcc(CPUAlphaState *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* In system mode we have access to a decent high-resolution clock.
|
||||
* In order to make OS-level time accounting work with the RPCC,
|
||||
* present it with a well-timed clock fixed at 250MHz.
|
||||
*/
|
||||
return (((uint64_t)env->pcc_ofs << 32)
|
||||
| (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
|
||||
#else
|
||||
/*
|
||||
* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through
|
||||
* the host cpu clock ticks. Also, don't bother taking PCC_OFS into
|
||||
* account.
|
||||
*/
|
||||
return (uint32_t)cpu_get_host_ticks();
|
||||
#endif
|
||||
}
|
|
@ -4,15 +4,18 @@ alpha_ss.add(files(
|
|||
'fpu_helper.c',
|
||||
'gdbstub.c',
|
||||
'helper.c',
|
||||
'clk_helper.c',
|
||||
'int_helper.c',
|
||||
'mem_helper.c',
|
||||
'sys_helper.c',
|
||||
'translate.c',
|
||||
'vax_helper.c',
|
||||
))
|
||||
|
||||
alpha_system_ss = ss.source_set()
|
||||
alpha_system_ss.add(files('machine.c'))
|
||||
alpha_system_ss.add(files(
|
||||
'machine.c',
|
||||
'sys_helper.c',
|
||||
))
|
||||
|
||||
target_arch += {'alpha': alpha_ss}
|
||||
target_system_arch += {'alpha': alpha_system_ss}
|
||||
|
|
|
@ -27,23 +27,7 @@
|
|||
#include "qemu/timer.h"
|
||||
|
||||
|
||||
uint64_t helper_load_pcc(CPUAlphaState *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* In system mode we have access to a decent high-resolution clock.
|
||||
In order to make OS-level time accounting work with the RPCC,
|
||||
present it with a well-timed clock fixed at 250MHz. */
|
||||
return (((uint64_t)env->pcc_ofs << 32)
|
||||
| (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
|
||||
#else
|
||||
/* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
|
||||
clock ticks. Also, don't bother taking PCC_OFS into account. */
|
||||
return (uint32_t)cpu_get_host_ticks();
|
||||
#endif
|
||||
}
|
||||
|
||||
/* PALcode support special instructions */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void helper_tbia(CPUAlphaState *env)
|
||||
{
|
||||
tlb_flush(env_cpu(env));
|
||||
|
@ -89,5 +73,3 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
|
|||
timer_del(cpu->alarm_timer);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
|
|
@ -1796,8 +1796,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
int pagebits;
|
||||
Error *local_err = NULL;
|
||||
|
||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
/* Use pc-relative instructions in system-mode */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
cs->tcg_cflags |= CF_PCREL;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -948,16 +948,19 @@ static int64_t cycles_ns_per(uint64_t cycles)
|
|||
|
||||
static bool instructions_supported(CPUARMState *env)
|
||||
{
|
||||
return icount_enabled() == 1; /* Precise instruction counting */
|
||||
/* Precise instruction counting */
|
||||
return icount_enabled() == ICOUNT_PRECISE;
|
||||
}
|
||||
|
||||
static uint64_t instructions_get_count(CPUARMState *env)
|
||||
{
|
||||
assert(icount_enabled() == ICOUNT_PRECISE);
|
||||
return (uint64_t)icount_get_raw();
|
||||
}
|
||||
|
||||
static int64_t instructions_ns_per(uint64_t icount)
|
||||
{
|
||||
assert(icount_enabled() == ICOUNT_PRECISE);
|
||||
return icount_to_ns((int64_t)icount);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -7221,8 +7221,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
static bool ht_warned;
|
||||
unsigned requested_lbr_fmt;
|
||||
|
||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
/* Use pc-relative instructions in system-mode */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
cs->tcg_cflags |= CF_PCREL;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -126,18 +126,18 @@ static const struct TCGCPUOps x86_tcg_ops = {
|
|||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
||||
static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
|
||||
static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
|
||||
{
|
||||
/* for x86, all cpus use the same set of operations */
|
||||
cc->tcg_ops = &x86_tcg_ops;
|
||||
}
|
||||
|
||||
static void tcg_cpu_class_init(CPUClass *cc)
|
||||
static void x86_tcg_cpu_class_init(CPUClass *cc)
|
||||
{
|
||||
cc->init_accel_cpu = tcg_cpu_init_ops;
|
||||
cc->init_accel_cpu = x86_tcg_cpu_init_ops;
|
||||
}
|
||||
|
||||
static void tcg_cpu_xsave_init(void)
|
||||
static void x86_tcg_cpu_xsave_init(void)
|
||||
{
|
||||
#define XO(bit, field) \
|
||||
x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
|
||||
|
@ -159,25 +159,25 @@ static void tcg_cpu_xsave_init(void)
|
|||
* TCG-specific defaults that override cpudef models when using TCG.
|
||||
* Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
|
||||
*/
|
||||
static PropValue tcg_default_props[] = {
|
||||
static PropValue x86_tcg_default_props[] = {
|
||||
{ "vme", "off" },
|
||||
{ NULL, NULL },
|
||||
};
|
||||
|
||||
static void tcg_cpu_instance_init(CPUState *cs)
|
||||
static void x86_tcg_cpu_instance_init(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
|
||||
|
||||
if (xcc->model) {
|
||||
/* Special cases not set in the X86CPUDefinition structs: */
|
||||
x86_cpu_apply_props(cpu, tcg_default_props);
|
||||
x86_cpu_apply_props(cpu, x86_tcg_default_props);
|
||||
}
|
||||
|
||||
tcg_cpu_xsave_init();
|
||||
x86_tcg_cpu_xsave_init();
|
||||
}
|
||||
|
||||
static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
|
||||
static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
|
||||
|
||||
|
@ -185,18 +185,18 @@ static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
|
|||
acc->cpu_target_realize = tcg_cpu_realizefn;
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
acc->cpu_class_init = tcg_cpu_class_init;
|
||||
acc->cpu_instance_init = tcg_cpu_instance_init;
|
||||
acc->cpu_class_init = x86_tcg_cpu_class_init;
|
||||
acc->cpu_instance_init = x86_tcg_cpu_instance_init;
|
||||
}
|
||||
static const TypeInfo tcg_cpu_accel_type_info = {
|
||||
static const TypeInfo x86_tcg_cpu_accel_type_info = {
|
||||
.name = ACCEL_CPU_NAME("tcg"),
|
||||
|
||||
.parent = TYPE_ACCEL_CPU,
|
||||
.class_init = tcg_cpu_accel_class_init,
|
||||
.class_init = x86_tcg_cpu_accel_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
static void tcg_cpu_accel_register_types(void)
|
||||
static void x86_tcg_cpu_accel_register_types(void)
|
||||
{
|
||||
type_register_static(&tcg_cpu_accel_type_info);
|
||||
type_register_static(&x86_tcg_cpu_accel_type_info);
|
||||
}
|
||||
type_init(tcg_cpu_accel_register_types);
|
||||
type_init(x86_tcg_cpu_accel_register_types);
|
||||
|
|
|
@ -929,7 +929,7 @@ static bool riscv_cpu_is_vendor(Object *cpu_obj)
|
|||
* -> cpu_exec_realizefn()
|
||||
* -> tcg_cpu_realize() (via accel_cpu_common_realize())
|
||||
*/
|
||||
static bool tcg_cpu_realize(CPUState *cs, Error **errp)
|
||||
static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
Error *local_err = NULL;
|
||||
|
@ -1372,7 +1372,7 @@ static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
|
|||
return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
|
||||
}
|
||||
|
||||
static void tcg_cpu_instance_init(CPUState *cs)
|
||||
static void riscv_tcg_cpu_instance_init(CPUState *cs)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
Object *obj = OBJECT(cpu);
|
||||
|
@ -1386,7 +1386,7 @@ static void tcg_cpu_instance_init(CPUState *cs)
|
|||
}
|
||||
}
|
||||
|
||||
static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
|
||||
static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
|
||||
{
|
||||
/*
|
||||
* All cpus use the same set of operations.
|
||||
|
@ -1394,30 +1394,30 @@ static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
|
|||
cc->tcg_ops = &riscv_tcg_ops;
|
||||
}
|
||||
|
||||
static void tcg_cpu_class_init(CPUClass *cc)
|
||||
static void riscv_tcg_cpu_class_init(CPUClass *cc)
|
||||
{
|
||||
cc->init_accel_cpu = tcg_cpu_init_ops;
|
||||
cc->init_accel_cpu = riscv_tcg_cpu_init_ops;
|
||||
}
|
||||
|
||||
static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
|
||||
static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
|
||||
|
||||
acc->cpu_class_init = tcg_cpu_class_init;
|
||||
acc->cpu_instance_init = tcg_cpu_instance_init;
|
||||
acc->cpu_target_realize = tcg_cpu_realize;
|
||||
acc->cpu_class_init = riscv_tcg_cpu_class_init;
|
||||
acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
|
||||
acc->cpu_target_realize = riscv_tcg_cpu_realize;
|
||||
}
|
||||
|
||||
static const TypeInfo tcg_cpu_accel_type_info = {
|
||||
static const TypeInfo riscv_tcg_cpu_accel_type_info = {
|
||||
.name = ACCEL_CPU_NAME("tcg"),
|
||||
|
||||
.parent = TYPE_ACCEL_CPU,
|
||||
.class_init = tcg_cpu_accel_class_init,
|
||||
.class_init = riscv_tcg_cpu_accel_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
|
||||
static void tcg_cpu_accel_register_types(void)
|
||||
static void riscv_tcg_cpu_accel_register_types(void)
|
||||
{
|
||||
type_register_static(&tcg_cpu_accel_type_info);
|
||||
type_register_static(&riscv_tcg_cpu_accel_type_info);
|
||||
}
|
||||
type_init(tcg_cpu_accel_register_types);
|
||||
type_init(riscv_tcg_cpu_accel_register_types);
|
||||
|
|
|
@ -233,6 +233,7 @@ static const struct TCGCPUOps xtensa_tcg_ops = {
|
|||
.do_interrupt = xtensa_cpu_do_interrupt,
|
||||
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
|
||||
.debug_check_breakpoint = xtensa_debug_check_breakpoint,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
||||
|
|
|
@ -229,6 +229,7 @@ enum {
|
|||
#define MAX_NCCOMPARE 3
|
||||
#define MAX_TLB_WAY_SIZE 8
|
||||
#define MAX_NDBREAK 2
|
||||
#define MAX_NIBREAK 2
|
||||
#define MAX_NMEMORY 4
|
||||
#define MAX_MPU_FOREGROUND_SEGMENTS 32
|
||||
|
||||
|
@ -547,6 +548,8 @@ struct CPUArchState {
|
|||
|
||||
/* Watchpoints for DBREAK registers */
|
||||
struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
|
||||
/* Breakpoints for IBREAK registers */
|
||||
struct CPUBreakpoint *cpu_breakpoint[MAX_NIBREAK];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -590,6 +593,7 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
|
|||
int mmu_idx, MemTxAttrs attrs,
|
||||
MemTxResult response, uintptr_t retaddr);
|
||||
hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
bool xtensa_debug_check_breakpoint(CPUState *cs);
|
||||
#endif
|
||||
void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
void xtensa_count_regs(const XtensaConfig *config,
|
||||
|
|
|
@ -33,27 +33,21 @@
|
|||
#include "exec/exec-all.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
|
||||
{
|
||||
uint32_t paddr;
|
||||
uint32_t page_size;
|
||||
unsigned access;
|
||||
int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
|
||||
&paddr, &page_size, &access);
|
||||
if (ret == 0) {
|
||||
tb_invalidate_phys_addr(&address_space_memory, paddr,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
uint32_t change = v ^ env->sregs[IBREAKENABLE];
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < env->config->nibreak; ++i) {
|
||||
if (change & (1 << i)) {
|
||||
tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
|
||||
if (v & (1 << i)) {
|
||||
cpu_breakpoint_insert(cs, env->sregs[IBREAKA + i],
|
||||
BP_CPU, &env->cpu_breakpoint[i]);
|
||||
} else {
|
||||
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[i]);
|
||||
env->cpu_breakpoint[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
|
||||
|
@ -62,12 +56,32 @@ void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
|
|||
void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
|
||||
{
|
||||
if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
|
||||
tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
|
||||
tb_invalidate_virtual_addr(env, v);
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[i]);
|
||||
cpu_breakpoint_insert(cs, v, BP_CPU, &env->cpu_breakpoint[i]);
|
||||
}
|
||||
env->sregs[IBREAKA + i] = v;
|
||||
}
|
||||
|
||||
bool xtensa_debug_check_breakpoint(CPUState *cs)
|
||||
{
|
||||
XtensaCPU *cpu = XTENSA_CPU(cs);
|
||||
CPUXtensaState *env = &cpu->env;
|
||||
unsigned int i;
|
||||
|
||||
if (xtensa_get_cintlevel(env) >= env->config->debug_level) {
|
||||
return false;
|
||||
}
|
||||
for (i = 0; i < env->config->nibreak; ++i) {
|
||||
if (env->sregs[IBREAKENABLE] & (1 << i) &&
|
||||
env->sregs[IBREAKA + i] == env->pc) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
|
||||
uint32_t dbreakc)
|
||||
{
|
||||
|
|
|
@ -231,6 +231,18 @@ void xtensa_breakpoint_handler(CPUState *cs)
|
|||
}
|
||||
cpu_loop_exit_noexc(cs);
|
||||
}
|
||||
} else {
|
||||
if (cpu_breakpoint_test(cs, env->pc, BP_GDB)
|
||||
|| !cpu_breakpoint_test(cs, env->pc, BP_CPU)) {
|
||||
return;
|
||||
}
|
||||
if (env->sregs[ICOUNT] == 0xffffffff &&
|
||||
xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
|
||||
debug_exception_env(env, DEBUGCAUSE_IC);
|
||||
} else {
|
||||
debug_exception_env(env, DEBUGCAUSE_IB);
|
||||
}
|
||||
cpu_loop_exit_noexc(cs);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1123,19 +1123,6 @@ static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
|
|||
return xtensa_op0_insn_len(dc, b0);
|
||||
}
|
||||
|
||||
static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < dc->config->nibreak; ++i) {
|
||||
if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
|
||||
env->sregs[IBREAKA + i] == dc->pc) {
|
||||
gen_debug_exception(dc, DEBUGCAUSE_IB);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void xtensa_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
CPUState *cpu)
|
||||
{
|
||||
|
@ -1205,10 +1192,6 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|||
gen_set_label(label);
|
||||
}
|
||||
|
||||
if (dc->debug) {
|
||||
gen_ibreak_check(env, dc);
|
||||
}
|
||||
|
||||
disas_xtensa_insn(env, dc);
|
||||
|
||||
if (dc->icount) {
|
||||
|
|
|
@ -129,7 +129,7 @@ test ibreak_remove
|
|||
4:
|
||||
test_end
|
||||
|
||||
test ibreak_priority
|
||||
test ibreak_break_priority
|
||||
set_vector debug_vector, 2f
|
||||
rsil a2, debug_level - 1
|
||||
movi a2, 1f
|
||||
|
@ -145,6 +145,29 @@ test ibreak_priority
|
|||
movi a3, 0x2
|
||||
assert eq, a2, a3
|
||||
test_end
|
||||
|
||||
test ibreak_icount_priority
|
||||
set_vector debug_vector, 2f
|
||||
rsil a2, debug_level - 1
|
||||
movi a2, 1f
|
||||
wsr a2, ibreaka0
|
||||
movi a2, 1
|
||||
wsr a2, ibreakenable
|
||||
movi a2, -2
|
||||
wsr a2, icount
|
||||
movi a2, 1
|
||||
wsr a2, icountlevel
|
||||
isync
|
||||
rsil a2, 0
|
||||
nop
|
||||
1:
|
||||
break 0, 0
|
||||
test_fail
|
||||
2:
|
||||
rsr a2, debugcause
|
||||
movi a3, 0x1
|
||||
assert eq, a2, a3
|
||||
test_end
|
||||
#endif
|
||||
|
||||
test icount
|
||||
|
|
16
util/async.c
16
util/async.c
|
@ -94,13 +94,15 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
|
|||
}
|
||||
|
||||
aio_notify(ctx);
|
||||
/*
|
||||
* Workaround for record/replay.
|
||||
* vCPU execution should be suspended when new BH is set.
|
||||
* This is needed to avoid guest timeouts caused
|
||||
* by the long cycles of the execution.
|
||||
*/
|
||||
icount_notify_exit();
|
||||
if (unlikely(icount_enabled())) {
|
||||
/*
|
||||
* Workaround for record/replay.
|
||||
* vCPU execution should be suspended when new BH is set.
|
||||
* This is needed to avoid guest timeouts caused
|
||||
* by the long cycles of the execution.
|
||||
*/
|
||||
icount_notify_exit();
|
||||
}
|
||||
}
|
||||
|
||||
/* Only called from aio_bh_poll() and aio_ctx_finalize() */
|
||||
|
|
Loading…
Reference in New Issue