mirror of https://github.com/xemu-project/xemu.git
* New KVM PV features (Marcelo, Wanpeng)
* valgrind fixes (Andrey) * Remove clock reset notifiers (David) * KConfig and Makefile cleanups (Paolo) * Replay and icount improvements (Pavel) * x86 FP fixes (Peter M.) * TCG locking assertions (Roman) * x86 support for mmap-ed -kernel/-initrd (Stefano) * Other cleanups (Wei Yang, Yan Zhao, Tony) * LSI fix for infinite loop (Prasad) * ARM migration fix (Catherine) * AVX512_BF16 feature (Jing) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJdXDVWAAoJEL/70l94x66DxDgH/0t6vXPomaXPA8D4WjL5o8/3 t+nYXskvrDU84GU/66MtGcFbkcZJuvoBcVOgiPvHeDnQqm0cqucEMoc1yisYF5Jq 6Q6i3A3kYvalhGnyV9ZgqThy9br2qCS9pCvs3dcEFefNgcay+wkz43SZXPPI0rg/ pkWeJ6x3u0SzlwFSMLqhSj3x1pmE8nB3mSGI8DyHUH4ixeJ5se9mr1GiUROe8q7t EeZxDCArnGFYltZo2HCQ7964pKz59A2xkpPAyy4soHBzBpQFaf2prtVqB/XFjisf B9worurtBs5/tMDGZyeibTXsT3mfULQXIb11KTabQzZiabMAUheBh5el4DdFsQM= =B15A -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * New KVM PV features (Marcelo, Wanpeng) * valgrind fixes (Andrey) * Remove clock reset notifiers (David) * KConfig and Makefile cleanups (Paolo) * Replay and icount improvements (Pavel) * x86 FP fixes (Peter M.) * TCG locking assertions (Roman) * x86 support for mmap-ed -kernel/-initrd (Stefano) * Other cleanups (Wei Yang, Yan Zhao, Tony) * LSI fix for infinite loop (Prasad) * ARM migration fix (Catherine) * AVX512_BF16 feature (Jing) # gpg: Signature made Tue 20 Aug 2019 19:00:54 BST # gpg: using RSA key BFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (33 commits) x86: Intel AVX512_BF16 feature enabling scsi: lsi: exit infinite loop while executing script (CVE-2019-12068) test-bitmap: test set 1 bit case for bitmap_set migration: do not rom_reset() during incoming migration HACKING: Document 'struct' keyword usage kvm: vmxcap: Enhance with latest features cpus-common: nuke finish_safe_work icount: remove unnecessary gen_io_end calls icount: clean up cpu_can_io at the entry to the block replay: rename step-related variables and functions replay: refine replay-time module replay: fix replay shutdown util/qemu-timer: refactor deadline calculation for external timers replay: document development rules replay: add missing fix for internal function timer: last, remove last bits of last replay: Remove host_clock_last timer: Remove reset notifiers mc146818rtc: Remove reset notifiers memory: fix race between TCG and accesses to dirty bitmap ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
f2cfa1229e
14
HACKING
14
HACKING
|
@ -100,7 +100,19 @@ pointer, you're guaranteed that it is used to modify the storage
|
|||
it points to, or it is aliased to another pointer that is.
|
||||
|
||||
2.3. Typedefs
|
||||
Typedefs are used to eliminate the redundant 'struct' keyword.
|
||||
|
||||
Typedefs are used to eliminate the redundant 'struct' keyword, since type
|
||||
names have a different style than other identifiers ("CamelCase" versus
|
||||
"snake_case"). Each named struct type should have a CamelCase name and a
|
||||
corresponding typedef.
|
||||
|
||||
Since certain C compilers choke on duplicated typedefs, you should avoid
|
||||
them and declare a typedef only in one header file. For common types,
|
||||
you can use "include/qemu/typedefs.h" for example. However, as a matter
|
||||
of convenience it is also perfectly fine to use forward struct
|
||||
definitions instead of typedefs in headers and function prototypes; this
|
||||
avoids problems with duplicated typedefs and reduces the need to include
|
||||
headers from other headers.
|
||||
|
||||
2.4. Reserved namespaces in C and POSIX
|
||||
Underscore capital, double underscore, and underscore 't' suffixes should be
|
||||
|
|
|
@ -28,6 +28,7 @@ config VHOST_USER
|
|||
|
||||
config XEN
|
||||
bool
|
||||
select FSDEV_9P if VIRTFS
|
||||
|
||||
config VIRTFS
|
||||
bool
|
||||
|
|
|
@ -169,7 +169,6 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
|
|||
}
|
||||
#endif /* DEBUG_DISAS */
|
||||
|
||||
cpu->can_do_io = !use_icount;
|
||||
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->can_do_io = 1;
|
||||
last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
|
||||
|
|
|
@ -90,7 +90,6 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
|||
/* Accept I/O on the last instruction. */
|
||||
gen_io_start();
|
||||
ops->translate_insn(db, cpu);
|
||||
gen_io_end();
|
||||
} else {
|
||||
ops->translate_insn(db, cpu);
|
||||
}
|
||||
|
|
|
@ -7431,11 +7431,16 @@ for target in $target_list; do
|
|||
target_dir="$target"
|
||||
config_target_mak=$target_dir/config-target.mak
|
||||
target_name=$(echo $target | cut -d '-' -f 1)
|
||||
target_aligned_only="no"
|
||||
case "$target_name" in
|
||||
alpha|hppa|mips64el|mips64|mipsel|mips|mipsn32|mipsn32el|sh4|sh4eb|sparc|sparc64|sparc32plus|xtensa|xtensaeb)
|
||||
target_aligned_only="yes"
|
||||
;;
|
||||
esac
|
||||
target_bigendian="no"
|
||||
|
||||
case "$target_name" in
|
||||
armeb|aarch64_be|hppa|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or1k|ppc|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb)
|
||||
target_bigendian=yes
|
||||
target_bigendian="yes"
|
||||
;;
|
||||
esac
|
||||
target_softmmu="no"
|
||||
|
@ -7717,6 +7722,9 @@ fi
|
|||
if supported_whpx_target $target; then
|
||||
echo "CONFIG_WHPX=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_aligned_only" = "yes" ; then
|
||||
echo "TARGET_ALIGNED_ONLY=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_bigendian" = "yes" ; then
|
||||
echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
|
||||
fi
|
||||
|
|
|
@ -69,12 +69,6 @@ static int cpu_get_free_index(void)
|
|||
return cpu_index;
|
||||
}
|
||||
|
||||
static void finish_safe_work(CPUState *cpu)
|
||||
{
|
||||
cpu_exec_start(cpu);
|
||||
cpu_exec_end(cpu);
|
||||
}
|
||||
|
||||
void cpu_list_add(CPUState *cpu)
|
||||
{
|
||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
||||
|
@ -86,8 +80,6 @@ void cpu_list_add(CPUState *cpu)
|
|||
}
|
||||
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
|
||||
finish_safe_work(cpu);
|
||||
}
|
||||
|
||||
void cpu_list_remove(CPUState *cpu)
|
||||
|
|
17
cpus.c
17
cpus.c
|
@ -556,7 +556,8 @@ void qtest_clock_warp(int64_t dest)
|
|||
assert(qtest_enabled());
|
||||
aio_context = qemu_get_aio_context();
|
||||
while (clock < dest) {
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
|
||||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
|
@ -616,7 +617,8 @@ void qemu_start_warp_timer(void)
|
|||
|
||||
/* We want to use the earliest deadline from ALL vm_clocks */
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
~QEMU_TIMER_ATTR_EXTERNAL);
|
||||
if (deadline < 0) {
|
||||
static bool notified;
|
||||
if (!icount_sleep && !notified) {
|
||||
|
@ -1352,7 +1354,12 @@ static int64_t tcg_get_icount_limit(void)
|
|||
int64_t deadline;
|
||||
|
||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
/*
|
||||
* Include all the timers, because they may need an attention.
|
||||
* Too long CPU execution may create unnecessary delay in UI.
|
||||
*/
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
|
||||
/* Maintain prior (possibly buggy) behaviour where if no deadline
|
||||
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
|
||||
|
@ -1373,8 +1380,8 @@ static void handle_icount_deadline(void)
|
|||
{
|
||||
assert(qemu_in_vcpu_thread());
|
||||
if (use_icount) {
|
||||
int64_t deadline =
|
||||
qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
|
||||
if (deadline == 0) {
|
||||
/* Wake up other AioContexts. */
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
Record/replay mechanism, that could be enabled through icount mode, expects
|
||||
the virtual devices to satisfy the following requirements.
|
||||
|
||||
The main idea behind this document is that everything that affects
|
||||
the guest state during execution in icount mode should be deterministic.
|
||||
|
||||
Timers
|
||||
======
|
||||
|
||||
All virtual devices should use virtual clock for timers that change the guest
|
||||
state. Virtual clock is deterministic, therefore such timers are deterministic
|
||||
too.
|
||||
|
||||
Virtual devices can also use realtime clock for the events that do not change
|
||||
the guest state directly. When the clock ticking should depend on VM execution
|
||||
speed, use virtual clock with EXTERNAL attribute. It is not deterministic,
|
||||
but its speed depends on the guest execution. This clock is used by
|
||||
the virtual devices (e.g., slirp routing device) that lie outside the
|
||||
replayed guest.
|
||||
|
||||
Bottom halves
|
||||
=============
|
||||
|
||||
Bottom half callbacks, that affect the guest state, should be invoked through
|
||||
replay_bh_schedule_event or replay_bh_schedule_oneshot_event functions.
|
||||
Their invocations are saved in record mode and synchronized with the existing
|
||||
log in replay mode.
|
||||
|
||||
Saving/restoring the VM state
|
||||
=============================
|
||||
|
||||
All fields in the device state structure (including virtual timers)
|
||||
should be restored by loadvm to the same values they had before savevm.
|
||||
|
||||
Avoid accessing other devices' state, because the order of saving/restoring
|
||||
is not defined. It means that you should not call functions like
|
||||
'update_irq' in post_load callback. Save everything explicitly to avoid
|
||||
the dependencies that may make restoring the VM state non-deterministic.
|
||||
|
||||
Stopping the VM
|
||||
===============
|
||||
|
||||
Stopping the guest should not interfere with its state (with the exception
|
||||
of the network connections, that could be broken by the remote timeouts).
|
||||
VM can be stopped at any moment of replay by the user. Restarting the VM
|
||||
after that stop should not break the replay by the unneeded guest state change.
|
31
exec.c
31
exec.c
|
@ -197,6 +197,7 @@ typedef struct subpage_t {
|
|||
|
||||
static void io_mem_init(void);
|
||||
static void memory_map_init(void);
|
||||
static void tcg_log_global_after_sync(MemoryListener *listener);
|
||||
static void tcg_commit(MemoryListener *listener);
|
||||
|
||||
static MemoryRegion io_mem_watch;
|
||||
|
@ -905,6 +906,7 @@ void cpu_address_space_init(CPUState *cpu, int asidx,
|
|||
newas->cpu = cpu;
|
||||
newas->as = as;
|
||||
if (tcg_enabled()) {
|
||||
newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
|
||||
newas->tcg_as_listener.commit = tcg_commit;
|
||||
memory_listener_register(&newas->tcg_as_listener, as);
|
||||
}
|
||||
|
@ -3142,6 +3144,35 @@ void address_space_dispatch_free(AddressSpaceDispatch *d)
|
|||
g_free(d);
|
||||
}
|
||||
|
||||
static void do_nothing(CPUState *cpu, run_on_cpu_data d)
|
||||
{
|
||||
}
|
||||
|
||||
static void tcg_log_global_after_sync(MemoryListener *listener)
|
||||
{
|
||||
CPUAddressSpace *cpuas;
|
||||
|
||||
/* Wait for the CPU to end the current TB. This avoids the following
|
||||
* incorrect race:
|
||||
*
|
||||
* vCPU migration
|
||||
* ---------------------- -------------------------
|
||||
* TLB check -> slow path
|
||||
* notdirty_mem_write
|
||||
* write to RAM
|
||||
* mark dirty
|
||||
* clear dirty flag
|
||||
* TLB check -> fast path
|
||||
* read memory
|
||||
* write to RAM
|
||||
*
|
||||
* by pushing the migration thread's memory read after the vCPU thread has
|
||||
* written the memory.
|
||||
*/
|
||||
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
|
||||
run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void tcg_commit(MemoryListener *listener)
|
||||
{
|
||||
CPUAddressSpace *cpuas;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Lots of the fsdev/9pcode is pulled in by vl.c via qemu_fsdev_add.
|
||||
# only pull in the actual 9p backend if we also enabled virtio or xen.
|
||||
ifeq ($(call land,$(CONFIG_VIRTFS),$(call lor,$(CONFIG_VIRTIO_9P),$(CONFIG_XEN))),y)
|
||||
ifeq ($(CONFIG_FSDEV_9P),y)
|
||||
common-obj-y = qemu-fsdev.o 9p-marshal.o 9p-iov-marshal.o
|
||||
else
|
||||
common-obj-y = qemu-fsdev-dummy.o
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
config FSDEV_9P
|
||||
bool
|
||||
depends on VIRTFS
|
||||
|
||||
config VIRTIO_9P
|
||||
bool
|
||||
default y
|
||||
depends on VIRTFS && VIRTIO
|
||||
select FSDEV_9P
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#include "exec/address-spaces.h"
|
||||
#include "hw/boards.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "sysemu/runstate.h"
|
||||
|
||||
#include <zlib.h>
|
||||
|
||||
|
@ -838,6 +839,7 @@ struct Rom {
|
|||
int isrom;
|
||||
char *fw_dir;
|
||||
char *fw_file;
|
||||
GMappedFile *mapped_file;
|
||||
|
||||
bool committed;
|
||||
|
||||
|
@ -848,10 +850,25 @@ struct Rom {
|
|||
static FWCfgState *fw_cfg;
|
||||
static QTAILQ_HEAD(, Rom) roms = QTAILQ_HEAD_INITIALIZER(roms);
|
||||
|
||||
/* rom->data must be heap-allocated (do not use with rom_add_elf_program()) */
|
||||
/*
|
||||
* rom->data can be heap-allocated or memory-mapped (e.g. when added with
|
||||
* rom_add_elf_program())
|
||||
*/
|
||||
static void rom_free_data(Rom *rom)
|
||||
{
|
||||
if (rom->mapped_file) {
|
||||
g_mapped_file_unref(rom->mapped_file);
|
||||
rom->mapped_file = NULL;
|
||||
} else {
|
||||
g_free(rom->data);
|
||||
}
|
||||
|
||||
rom->data = NULL;
|
||||
}
|
||||
|
||||
static void rom_free(Rom *rom)
|
||||
{
|
||||
g_free(rom->data);
|
||||
rom_free_data(rom);
|
||||
g_free(rom->path);
|
||||
g_free(rom->name);
|
||||
g_free(rom->fw_dir);
|
||||
|
@ -1058,11 +1075,12 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
|
|||
|
||||
/* This function is specific for elf program because we don't need to allocate
|
||||
* all the rom. We just allocate the first part and the rest is just zeros. This
|
||||
* is why romsize and datasize are different. Also, this function seize the
|
||||
* memory ownership of "data", so we don't have to allocate and copy the buffer.
|
||||
* is why romsize and datasize are different. Also, this function takes its own
|
||||
* reference to "mapped_file", so we don't have to allocate and copy the buffer.
|
||||
*/
|
||||
int rom_add_elf_program(const char *name, void *data, size_t datasize,
|
||||
size_t romsize, hwaddr addr, AddressSpace *as)
|
||||
int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data,
|
||||
size_t datasize, size_t romsize, hwaddr addr,
|
||||
AddressSpace *as)
|
||||
{
|
||||
Rom *rom;
|
||||
|
||||
|
@ -1073,6 +1091,12 @@ int rom_add_elf_program(const char *name, void *data, size_t datasize,
|
|||
rom->romsize = romsize;
|
||||
rom->data = data;
|
||||
rom->as = as;
|
||||
|
||||
if (mapped_file && data) {
|
||||
g_mapped_file_ref(mapped_file);
|
||||
rom->mapped_file = mapped_file;
|
||||
}
|
||||
|
||||
rom_insert(rom);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1091,6 +1115,15 @@ static void rom_reset(void *unused)
|
|||
{
|
||||
Rom *rom;
|
||||
|
||||
/*
|
||||
* We don't need to fill in the RAM with ROM data because we'll fill
|
||||
* the data in during the next incoming migration in all cases. Note
|
||||
* that some of those RAMs can actually be modified by the guest on ARM
|
||||
* so this is probably the only right thing to do here.
|
||||
*/
|
||||
if (runstate_check(RUN_STATE_INMIGRATE))
|
||||
return;
|
||||
|
||||
QTAILQ_FOREACH(rom, &roms, next) {
|
||||
if (rom->fw_file) {
|
||||
continue;
|
||||
|
@ -1107,8 +1140,7 @@ static void rom_reset(void *unused)
|
|||
}
|
||||
if (rom->isrom) {
|
||||
/* rom needs to be written only once */
|
||||
g_free(rom->data);
|
||||
rom->data = NULL;
|
||||
rom_free_data(rom);
|
||||
}
|
||||
/*
|
||||
* The rom loader is really on the same level as firmware in the guest
|
||||
|
|
17
hw/i386/pc.c
17
hw/i386/pc.c
|
@ -1244,17 +1244,21 @@ static void load_linux(PCMachineState *pcms,
|
|||
|
||||
/* load initrd */
|
||||
if (initrd_filename) {
|
||||
GMappedFile *mapped_file;
|
||||
gsize initrd_size;
|
||||
gchar *initrd_data;
|
||||
GError *gerr = NULL;
|
||||
|
||||
if (!g_file_get_contents(initrd_filename, &initrd_data,
|
||||
&initrd_size, &gerr)) {
|
||||
mapped_file = g_mapped_file_new(initrd_filename, false, &gerr);
|
||||
if (!mapped_file) {
|
||||
fprintf(stderr, "qemu: error reading initrd %s: %s\n",
|
||||
initrd_filename, gerr->message);
|
||||
exit(1);
|
||||
}
|
||||
pcms->initrd_mapped_file = mapped_file;
|
||||
|
||||
initrd_data = g_mapped_file_get_contents(mapped_file);
|
||||
initrd_size = g_mapped_file_get_length(mapped_file);
|
||||
initrd_max = pcms->below_4g_mem_size - pcmc->acpi_data_size - 1;
|
||||
if (initrd_size >= initrd_max) {
|
||||
fprintf(stderr, "qemu: initrd is too large, cannot support."
|
||||
|
@ -1381,6 +1385,7 @@ static void load_linux(PCMachineState *pcms,
|
|||
|
||||
/* load initrd */
|
||||
if (initrd_filename) {
|
||||
GMappedFile *mapped_file;
|
||||
gsize initrd_size;
|
||||
gchar *initrd_data;
|
||||
GError *gerr = NULL;
|
||||
|
@ -1390,12 +1395,16 @@ static void load_linux(PCMachineState *pcms,
|
|||
exit(1);
|
||||
}
|
||||
|
||||
if (!g_file_get_contents(initrd_filename, &initrd_data,
|
||||
&initrd_size, &gerr)) {
|
||||
mapped_file = g_mapped_file_new(initrd_filename, false, &gerr);
|
||||
if (!mapped_file) {
|
||||
fprintf(stderr, "qemu: error reading initrd %s: %s\n",
|
||||
initrd_filename, gerr->message);
|
||||
exit(1);
|
||||
}
|
||||
pcms->initrd_mapped_file = mapped_file;
|
||||
|
||||
initrd_data = g_mapped_file_get_contents(mapped_file);
|
||||
initrd_size = g_mapped_file_get_length(mapped_file);
|
||||
if (initrd_size >= initrd_max) {
|
||||
fprintf(stderr, "qemu: initrd is too large, cannot support."
|
||||
"(max: %"PRIu32", need %"PRId64")\n",
|
||||
|
|
|
@ -186,6 +186,9 @@ static const char *names[] = {
|
|||
/* Flag set if this is a tagged command. */
|
||||
#define LSI_TAG_VALID (1 << 16)
|
||||
|
||||
/* Maximum instructions to process. */
|
||||
#define LSI_MAX_INSN 10000
|
||||
|
||||
typedef struct lsi_request {
|
||||
SCSIRequest *req;
|
||||
uint32_t tag;
|
||||
|
@ -1133,7 +1136,21 @@ static void lsi_execute_script(LSIState *s)
|
|||
|
||||
s->istat1 |= LSI_ISTAT1_SRUN;
|
||||
again:
|
||||
insn_processed++;
|
||||
if (++insn_processed > LSI_MAX_INSN) {
|
||||
/* Some windows drivers make the device spin waiting for a memory
|
||||
location to change. If we have been executed a lot of code then
|
||||
assume this is the case and force an unexpected device disconnect.
|
||||
This is apparently sufficient to beat the drivers into submission.
|
||||
*/
|
||||
if (!(s->sien0 & LSI_SIST0_UDC)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"lsi_scsi: inf. loop with UDC masked");
|
||||
}
|
||||
lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0);
|
||||
lsi_disconnect(s);
|
||||
trace_lsi_execute_script_stop();
|
||||
return;
|
||||
}
|
||||
insn = read_dword(s, s->dsp);
|
||||
if (!insn) {
|
||||
/* If we receive an empty opcode increment the DSP by 4 bytes
|
||||
|
@ -1570,19 +1587,7 @@ again:
|
|||
}
|
||||
}
|
||||
}
|
||||
if (insn_processed > 10000 && s->waiting == LSI_NOWAIT) {
|
||||
/* Some windows drivers make the device spin waiting for a memory
|
||||
location to change. If we have been executed a lot of code then
|
||||
assume this is the case and force an unexpected device disconnect.
|
||||
This is apparently sufficient to beat the drivers into submission.
|
||||
*/
|
||||
if (!(s->sien0 & LSI_SIST0_UDC)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"lsi_scsi: inf. loop with UDC masked");
|
||||
}
|
||||
lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0);
|
||||
lsi_disconnect(s);
|
||||
} else if (s->istat1 & LSI_ISTAT1_SRUN && s->waiting == LSI_NOWAIT) {
|
||||
if (s->istat1 & LSI_ISTAT1_SRUN && s->waiting == LSI_NOWAIT) {
|
||||
if (s->dcntl & LSI_DCNTL_SSM) {
|
||||
lsi_script_dma_interrupt(s, LSI_DSTAT_SSI);
|
||||
} else {
|
||||
|
@ -1970,6 +1975,10 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
|
|||
case 0x2f: /* DSP[24:31] */
|
||||
s->dsp &= 0x00ffffff;
|
||||
s->dsp |= val << 24;
|
||||
/*
|
||||
* FIXME: if s->waiting != LSI_NOWAIT, this will only execute one
|
||||
* instruction. Is this correct?
|
||||
*/
|
||||
if ((s->dmode & LSI_DMODE_MAN) == 0
|
||||
&& (s->istat1 & LSI_ISTAT1_SRUN) == 0)
|
||||
lsi_execute_script(s);
|
||||
|
@ -1988,6 +1997,10 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
|
|||
break;
|
||||
case 0x3b: /* DCNTL */
|
||||
s->dcntl = val & ~(LSI_DCNTL_PFF | LSI_DCNTL_STD);
|
||||
/*
|
||||
* FIXME: if s->waiting != LSI_NOWAIT, this will only execute one
|
||||
* instruction. Is this correct?
|
||||
*/
|
||||
if ((val & LSI_DCNTL_STD) && (s->istat1 & LSI_ISTAT1_SRUN) == 0)
|
||||
lsi_execute_script(s);
|
||||
break;
|
||||
|
|
|
@ -96,7 +96,6 @@ typedef struct RTCState {
|
|||
uint32_t irq_coalesced;
|
||||
uint32_t period;
|
||||
QEMUTimer *coalesced_timer;
|
||||
Notifier clock_reset_notifier;
|
||||
LostTickPolicy lost_tick_policy;
|
||||
Notifier suspend_notifier;
|
||||
QLIST_ENTRY(RTCState) link;
|
||||
|
@ -889,20 +888,6 @@ static const VMStateDescription vmstate_rtc = {
|
|||
}
|
||||
};
|
||||
|
||||
static void rtc_notify_clock_reset(Notifier *notifier, void *data)
|
||||
{
|
||||
RTCState *s = container_of(notifier, RTCState, clock_reset_notifier);
|
||||
int64_t now = *(int64_t *)data;
|
||||
|
||||
rtc_set_date_from_host(ISA_DEVICE(s));
|
||||
periodic_timer_update(s, now, 0);
|
||||
check_update_timer(s);
|
||||
|
||||
if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) {
|
||||
rtc_coalesced_timer_update(s);
|
||||
}
|
||||
}
|
||||
|
||||
/* set CMOS shutdown status register (index 0xF) as S3_resume(0xFE)
|
||||
BIOS will read it and start S3 resume at POST Entry */
|
||||
static void rtc_notify_suspend(Notifier *notifier, void *data)
|
||||
|
@ -988,10 +973,6 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
|
|||
s->update_timer = timer_new_ns(rtc_clock, rtc_update_timer, s);
|
||||
check_update_timer(s);
|
||||
|
||||
s->clock_reset_notifier.notify = rtc_notify_clock_reset;
|
||||
qemu_clock_register_reset_notifier(rtc_clock,
|
||||
&s->clock_reset_notifier);
|
||||
|
||||
s->suspend_notifier.notify = rtc_notify_suspend;
|
||||
qemu_register_suspend_notifier(&s->suspend_notifier);
|
||||
|
||||
|
|
|
@ -7,6 +7,31 @@
|
|||
|
||||
static TCGOp *icount_start_insn;
|
||||
|
||||
static inline void gen_io_start(void)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32(1);
|
||||
tcg_gen_st_i32(tmp, cpu_env,
|
||||
offsetof(ArchCPU, parent_obj.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu->can_do_io is cleared automatically at the beginning of
|
||||
* each translation block. The cost is minimal and only paid
|
||||
* for -icount, plus it would be very easy to forget doing it
|
||||
* in the translator. Therefore, backends only need to call
|
||||
* gen_io_start.
|
||||
*/
|
||||
static inline void gen_io_end(void)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32(0);
|
||||
tcg_gen_st_i32(tmp, cpu_env,
|
||||
offsetof(ArchCPU, parent_obj.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
||||
static inline void gen_tb_start(TranslationBlock *tb)
|
||||
{
|
||||
TCGv_i32 count, imm;
|
||||
|
@ -40,6 +65,7 @@ static inline void gen_tb_start(TranslationBlock *tb)
|
|||
tcg_gen_st16_i32(count, cpu_env,
|
||||
offsetof(ArchCPU, neg.icount_decr.u16.low) -
|
||||
offsetof(ArchCPU, env));
|
||||
gen_io_end();
|
||||
}
|
||||
|
||||
tcg_temp_free_i32(count);
|
||||
|
@ -57,22 +83,4 @@ static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
|
|||
tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
|
||||
}
|
||||
|
||||
static inline void gen_io_start(void)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32(1);
|
||||
tcg_gen_st_i32(tmp, cpu_env,
|
||||
offsetof(ArchCPU, parent_obj.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
||||
static inline void gen_io_end(void)
|
||||
{
|
||||
TCGv_i32 tmp = tcg_const_i32(0);
|
||||
tcg_gen_st_i32(tmp, cpu_env,
|
||||
offsetof(ArchCPU, parent_obj.can_do_io) -
|
||||
offsetof(ArchCPU, env));
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -425,6 +425,7 @@ struct MemoryListener {
|
|||
void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
|
||||
void (*log_global_start)(MemoryListener *listener);
|
||||
void (*log_global_stop)(MemoryListener *listener);
|
||||
void (*log_global_after_sync)(MemoryListener *listener);
|
||||
void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
bool match_data, uint64_t data, EventNotifier *e);
|
||||
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
|
||||
|
@ -1687,6 +1688,17 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
|
|||
*/
|
||||
void memory_global_dirty_log_sync(void);
|
||||
|
||||
/**
|
||||
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
|
||||
*
|
||||
* Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
|
||||
* This function must be called after the dirty log bitmap is cleared, and
|
||||
* before dirty guest memory pages are read. If you are using
|
||||
* #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
|
||||
* care of doing this.
|
||||
*/
|
||||
void memory_global_after_dirty_log_sync(void);
|
||||
|
||||
/**
|
||||
* memory_region_transaction_begin: Start a transaction.
|
||||
*
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#pragma GCC poison TARGET_UNICORE32
|
||||
#pragma GCC poison TARGET_XTENSA
|
||||
|
||||
#pragma GCC poison TARGET_ALIGNED_ONLY
|
||||
#pragma GCC poison TARGET_HAS_BFLT
|
||||
#pragma GCC poison TARGET_NAME
|
||||
#pragma GCC poison TARGET_SUPPORTS_MTTCG
|
||||
|
|
|
@ -323,8 +323,9 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||
struct elfhdr ehdr;
|
||||
struct elf_phdr *phdr = NULL, *ph;
|
||||
int size, i, total_size;
|
||||
elf_word mem_size, file_size;
|
||||
elf_word mem_size, file_size, data_offset;
|
||||
uint64_t addr, low = (uint64_t)-1, high = 0;
|
||||
GMappedFile *mapped_file = NULL;
|
||||
uint8_t *data = NULL;
|
||||
char label[128];
|
||||
int ret = ELF_LOAD_FAILED;
|
||||
|
@ -409,20 +410,32 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we want to be able to modify the mapped buffer, we set the
|
||||
* 'writeble' parameter to 'true'. Modifications to the buffer are not
|
||||
* written back to the file.
|
||||
*/
|
||||
mapped_file = g_mapped_file_new_from_fd(fd, true, NULL);
|
||||
if (!mapped_file) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
total_size = 0;
|
||||
for(i = 0; i < ehdr.e_phnum; i++) {
|
||||
ph = &phdr[i];
|
||||
if (ph->p_type == PT_LOAD) {
|
||||
mem_size = ph->p_memsz; /* Size of the ROM */
|
||||
file_size = ph->p_filesz; /* Size of the allocated data */
|
||||
data = g_malloc0(file_size);
|
||||
if (ph->p_filesz > 0) {
|
||||
if (lseek(fd, ph->p_offset, SEEK_SET) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (read(fd, data, file_size) != file_size) {
|
||||
data_offset = ph->p_offset; /* Offset where the data is located */
|
||||
|
||||
if (file_size > 0) {
|
||||
if (g_mapped_file_get_length(mapped_file) <
|
||||
file_size + data_offset) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
data = (uint8_t *)g_mapped_file_get_contents(mapped_file);
|
||||
data += data_offset;
|
||||
}
|
||||
|
||||
/* The ELF spec is somewhat vague about the purpose of the
|
||||
|
@ -513,25 +526,25 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||
*pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr;
|
||||
}
|
||||
|
||||
if (mem_size == 0) {
|
||||
/* Some ELF files really do have segments of zero size;
|
||||
* just ignore them rather than trying to create empty
|
||||
* ROM blobs, because the zero-length blob can falsely
|
||||
* trigger the overlapping-ROM-blobs check.
|
||||
*/
|
||||
g_free(data);
|
||||
} else {
|
||||
/* Some ELF files really do have segments of zero size;
|
||||
* just ignore them rather than trying to create empty
|
||||
* ROM blobs, because the zero-length blob can falsely
|
||||
* trigger the overlapping-ROM-blobs check.
|
||||
*/
|
||||
if (mem_size != 0) {
|
||||
if (load_rom) {
|
||||
snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
|
||||
|
||||
/* rom_add_elf_program() seize the ownership of 'data' */
|
||||
rom_add_elf_program(label, data, file_size, mem_size,
|
||||
addr, as);
|
||||
/*
|
||||
* rom_add_elf_program() takes its own reference to
|
||||
* 'mapped_file'.
|
||||
*/
|
||||
rom_add_elf_program(label, mapped_file, data, file_size,
|
||||
mem_size, addr, as);
|
||||
} else {
|
||||
address_space_write(as ? as : &address_space_memory,
|
||||
addr, MEMTXATTRS_UNSPECIFIED,
|
||||
data, file_size);
|
||||
g_free(data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,14 +560,16 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||
struct elf_note *nhdr = NULL;
|
||||
|
||||
file_size = ph->p_filesz; /* Size of the range of ELF notes */
|
||||
data = g_malloc0(file_size);
|
||||
if (ph->p_filesz > 0) {
|
||||
if (lseek(fd, ph->p_offset, SEEK_SET) < 0) {
|
||||
goto fail;
|
||||
}
|
||||
if (read(fd, data, file_size) != file_size) {
|
||||
data_offset = ph->p_offset; /* Offset where the notes are located */
|
||||
|
||||
if (file_size > 0) {
|
||||
if (g_mapped_file_get_length(mapped_file) <
|
||||
file_size + data_offset) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
data = (uint8_t *)g_mapped_file_get_contents(mapped_file);
|
||||
data += data_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -570,19 +585,17 @@ static int glue(load_elf, SZ)(const char *name, int fd,
|
|||
sizeof(struct elf_note) == sizeof(struct elf64_note);
|
||||
elf_note_fn((void *)nhdr, (void *)&ph->p_align, is64);
|
||||
}
|
||||
g_free(data);
|
||||
data = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
g_free(phdr);
|
||||
if (lowaddr)
|
||||
*lowaddr = (uint64_t)(elf_sword)low;
|
||||
if (highaddr)
|
||||
*highaddr = (uint64_t)(elf_sword)high;
|
||||
return total_size;
|
||||
ret = total_size;
|
||||
fail:
|
||||
g_free(data);
|
||||
g_mapped_file_unref(mapped_file);
|
||||
g_free(phdr);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ struct PCMachineState {
|
|||
FWCfgState *fw_cfg;
|
||||
qemu_irq *gsi;
|
||||
PFlashCFI01 *flash[2];
|
||||
GMappedFile *initrd_mapped_file;
|
||||
|
||||
/* Configuration options: */
|
||||
uint64_t max_ram_below_4g;
|
||||
|
|
|
@ -258,8 +258,9 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
|
|||
FWCfgCallback fw_callback,
|
||||
void *callback_opaque, AddressSpace *as,
|
||||
bool read_only);
|
||||
int rom_add_elf_program(const char *name, void *data, size_t datasize,
|
||||
size_t romsize, hwaddr addr, AddressSpace *as);
|
||||
int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data,
|
||||
size_t datasize, size_t romsize, hwaddr addr,
|
||||
AddressSpace *as);
|
||||
int rom_check_and_register_reset(void);
|
||||
void rom_set_fw(FWCfgState *f);
|
||||
void rom_set_order_override(int order);
|
||||
|
|
|
@ -62,13 +62,15 @@ typedef enum {
|
|||
* The following attributes are available:
|
||||
*
|
||||
* QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem
|
||||
* QEMU_TIMER_ATTR_ALL: mask for all existing attributes
|
||||
*
|
||||
* Timers with this attribute do not recorded in rr mode, therefore it could be
|
||||
* used for the subsystems that operate outside the guest core. Applicable only
|
||||
* with virtual clock type.
|
||||
*/
|
||||
|
||||
#define QEMU_TIMER_ATTR_EXTERNAL BIT(0)
|
||||
#define QEMU_TIMER_ATTR_EXTERNAL ((int)BIT(0))
|
||||
#define QEMU_TIMER_ATTR_ALL 0xffffffff
|
||||
|
||||
typedef struct QEMUTimerList QEMUTimerList;
|
||||
|
||||
|
@ -177,6 +179,8 @@ bool qemu_clock_use_for_deadline(QEMUClockType type);
|
|||
/**
|
||||
* qemu_clock_deadline_ns_all:
|
||||
* @type: the clock type
|
||||
* @attr_mask: mask for the timer attributes that are included
|
||||
* in deadline calculation
|
||||
*
|
||||
* Calculate the deadline across all timer lists associated
|
||||
* with a clock (as opposed to just the default one)
|
||||
|
@ -184,7 +188,7 @@ bool qemu_clock_use_for_deadline(QEMUClockType type);
|
|||
*
|
||||
* Returns: time until expiry in nanoseconds or -1
|
||||
*/
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type);
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask);
|
||||
|
||||
/**
|
||||
* qemu_clock_get_main_loop_timerlist:
|
||||
|
@ -227,28 +231,6 @@ void qemu_clock_enable(QEMUClockType type, bool enabled);
|
|||
*/
|
||||
void qemu_start_warp_timer(void);
|
||||
|
||||
/**
|
||||
* qemu_clock_register_reset_notifier:
|
||||
* @type: the clock type
|
||||
* @notifier: the notifier function
|
||||
*
|
||||
* Register a notifier function to call when the clock
|
||||
* concerned is reset.
|
||||
*/
|
||||
void qemu_clock_register_reset_notifier(QEMUClockType type,
|
||||
Notifier *notifier);
|
||||
|
||||
/**
|
||||
* qemu_clock_unregister_reset_notifier:
|
||||
* @type: the clock type
|
||||
* @notifier: the notifier function
|
||||
*
|
||||
* Unregister a notifier function to call when the clock
|
||||
* concerned is reset.
|
||||
*/
|
||||
void qemu_clock_unregister_reset_notifier(QEMUClockType type,
|
||||
Notifier *notifier);
|
||||
|
||||
/**
|
||||
* qemu_clock_run_timers:
|
||||
* @type: clock on which to operate
|
||||
|
@ -270,19 +252,6 @@ bool qemu_clock_run_timers(QEMUClockType type);
|
|||
*/
|
||||
bool qemu_clock_run_all_timers(void);
|
||||
|
||||
/**
|
||||
* qemu_clock_get_last:
|
||||
*
|
||||
* Returns last clock query time.
|
||||
*/
|
||||
uint64_t qemu_clock_get_last(QEMUClockType type);
|
||||
/**
|
||||
* qemu_clock_set_last:
|
||||
*
|
||||
* Sets last clock query time.
|
||||
*/
|
||||
void qemu_clock_set_last(QEMUClockType type, uint64_t last);
|
||||
|
||||
|
||||
/*
|
||||
* QEMUTimerList
|
||||
|
|
|
@ -89,7 +89,7 @@ struct TranslationBlock;
|
|||
* @do_unassigned_access: Callback for unassigned access handling.
|
||||
* (this is deprecated: new targets should use do_transaction_failed instead)
|
||||
* @do_unaligned_access: Callback for unaligned access handling, if
|
||||
* the target defines #ALIGNED_ONLY.
|
||||
* the target defines #TARGET_ALIGNED_ONLY.
|
||||
* @do_transaction_failed: Callback for handling failed memory transactions
|
||||
* (ie bus faults or external aborts; not MMU faults)
|
||||
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define KVM_FEATURE_PV_TLB_FLUSH 9
|
||||
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
|
||||
#define KVM_FEATURE_PV_SEND_IPI 11
|
||||
#define KVM_FEATURE_POLL_CONTROL 12
|
||||
|
||||
#define KVM_HINTS_REALTIME 0
|
||||
|
||||
|
@ -47,6 +48,7 @@
|
|||
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
|
||||
#define MSR_KVM_STEAL_TIME 0x4b564d03
|
||||
#define MSR_KVM_PV_EOI_EN 0x4b564d04
|
||||
#define MSR_KVM_POLL_CONTROL 0x4b564d05
|
||||
|
||||
struct kvm_steal_time {
|
||||
uint64_t steal;
|
||||
|
|
|
@ -75,7 +75,7 @@ void replay_add_blocker(Error *reason);
|
|||
/* Processing the instructions */
|
||||
|
||||
/*! Returns number of executed instructions. */
|
||||
uint64_t replay_get_current_step(void);
|
||||
uint64_t replay_get_current_icount(void);
|
||||
/*! Returns number of instructions to execute in replay mode. */
|
||||
int replay_get_instructions(void);
|
||||
/*! Updates instructions counter in replay mode. */
|
||||
|
|
16
memory.c
16
memory.c
|
@ -1942,16 +1942,18 @@ void memory_region_notify_one(IOMMUNotifier *notifier,
|
|||
IOMMUTLBEntry *entry)
|
||||
{
|
||||
IOMMUNotifierFlag request_flags;
|
||||
hwaddr entry_end = entry->iova + entry->addr_mask;
|
||||
|
||||
/*
|
||||
* Skip the notification if the notification does not overlap
|
||||
* with registered range.
|
||||
*/
|
||||
if (notifier->start > entry->iova + entry->addr_mask ||
|
||||
notifier->end < entry->iova) {
|
||||
if (notifier->start > entry_end || notifier->end < entry->iova) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(entry->iova >= notifier->start && entry_end <= notifier->end);
|
||||
|
||||
if (entry->perm & IOMMU_RW) {
|
||||
request_flags = IOMMU_NOTIFIER_MAP;
|
||||
} else {
|
||||
|
@ -2125,9 +2127,12 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
|
|||
hwaddr size,
|
||||
unsigned client)
|
||||
{
|
||||
DirtyBitmapSnapshot *snapshot;
|
||||
assert(mr->ram_block);
|
||||
memory_region_sync_dirty_bitmap(mr);
|
||||
return cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
|
||||
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
|
||||
memory_global_after_dirty_log_sync();
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
|
||||
|
@ -2618,6 +2623,11 @@ void memory_global_dirty_log_sync(void)
|
|||
memory_region_sync_dirty_bitmap(NULL);
|
||||
}
|
||||
|
||||
void memory_global_after_dirty_log_sync(void)
|
||||
{
|
||||
MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
|
||||
}
|
||||
|
||||
static VMChangeStateEntry *vmstate_change;
|
||||
|
||||
void memory_global_dirty_log_start(void)
|
||||
|
|
|
@ -1857,6 +1857,7 @@ static void migration_bitmap_sync(RAMState *rs)
|
|||
rcu_read_unlock();
|
||||
qemu_mutex_unlock(&rs->bitmap_mutex);
|
||||
|
||||
memory_global_after_dirty_log_sync();
|
||||
trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
|
||||
|
||||
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||
|
|
3
qtest.c
3
qtest.c
|
@ -654,7 +654,8 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
|
|||
int ret = qemu_strtoi64(words[1], NULL, 0, &ns);
|
||||
g_assert(ret == 0);
|
||||
} else {
|
||||
ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
}
|
||||
qtest_clock_warp(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns);
|
||||
qtest_send_prefix(chr);
|
||||
|
|
|
@ -124,7 +124,7 @@ void replay_add_event(ReplayAsyncEventKind event_kind,
|
|||
void replay_bh_schedule_event(QEMUBH *bh)
|
||||
{
|
||||
if (events_enabled) {
|
||||
uint64_t id = replay_get_current_step();
|
||||
uint64_t id = replay_get_current_icount();
|
||||
replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
|
||||
} else {
|
||||
qemu_bh_schedule(bh);
|
||||
|
|
|
@ -173,7 +173,7 @@ void replay_fetch_data_kind(void)
|
|||
if (!replay_state.has_unread_data) {
|
||||
replay_state.data_kind = replay_get_byte();
|
||||
if (replay_state.data_kind == EVENT_INSTRUCTION) {
|
||||
replay_state.instructions_count = replay_get_dword();
|
||||
replay_state.instruction_count = replay_get_dword();
|
||||
}
|
||||
replay_check_error();
|
||||
replay_state.has_unread_data = 1;
|
||||
|
@ -227,9 +227,9 @@ void replay_mutex_unlock(void)
|
|||
}
|
||||
}
|
||||
|
||||
void replay_advance_current_step(uint64_t current_step)
|
||||
void replay_advance_current_icount(uint64_t current_icount)
|
||||
{
|
||||
int diff = (int)(replay_get_current_step() - replay_state.current_step);
|
||||
int diff = (int)(current_icount - replay_state.current_icount);
|
||||
|
||||
/* Time can only go forward */
|
||||
assert(diff >= 0);
|
||||
|
@ -237,7 +237,7 @@ void replay_advance_current_step(uint64_t current_step)
|
|||
if (diff > 0) {
|
||||
replay_put_event(EVENT_INSTRUCTION);
|
||||
replay_put_dword(diff);
|
||||
replay_state.current_step += diff;
|
||||
replay_state.current_icount += diff;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,6 +246,6 @@ void replay_save_instructions(void)
|
|||
{
|
||||
if (replay_file && replay_mode == REPLAY_MODE_RECORD) {
|
||||
g_assert(replay_mutex_locked());
|
||||
replay_advance_current_step(replay_get_current_step());
|
||||
replay_advance_current_icount(replay_get_current_icount());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,10 +64,10 @@ typedef enum ReplayAsyncEventKind ReplayAsyncEventKind;
|
|||
typedef struct ReplayState {
|
||||
/*! Cached clock values. */
|
||||
int64_t cached_clock[REPLAY_CLOCK_COUNT];
|
||||
/*! Current step - number of processed instructions and timer events. */
|
||||
uint64_t current_step;
|
||||
/*! Current icount - number of processed instructions. */
|
||||
uint64_t current_icount;
|
||||
/*! Number of instructions to be executed before other events happen. */
|
||||
int instructions_count;
|
||||
int instruction_count;
|
||||
/*! Type of the currently executed event. */
|
||||
unsigned int data_kind;
|
||||
/*! Flag which indicates that event is not processed yet. */
|
||||
|
@ -122,8 +122,8 @@ void replay_finish_event(void);
|
|||
data_kind variable. */
|
||||
void replay_fetch_data_kind(void);
|
||||
|
||||
/*! Advance replay_state.current_step to the specified value. */
|
||||
void replay_advance_current_step(uint64_t current_step);
|
||||
/*! Advance replay_state.current_icount to the specified value. */
|
||||
void replay_advance_current_icount(uint64_t current_icount);
|
||||
/*! Saves queued events (like instructions and sound). */
|
||||
void replay_save_instructions(void);
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ static int replay_pre_save(void *opaque)
|
|||
{
|
||||
ReplayState *state = opaque;
|
||||
state->file_offset = ftell(replay_file);
|
||||
state->host_clock_last = qemu_clock_get_last(QEMU_CLOCK_HOST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,14 +32,13 @@ static int replay_post_load(void *opaque, int version_id)
|
|||
ReplayState *state = opaque;
|
||||
if (replay_mode == REPLAY_MODE_PLAY) {
|
||||
fseek(replay_file, state->file_offset, SEEK_SET);
|
||||
qemu_clock_set_last(QEMU_CLOCK_HOST, state->host_clock_last);
|
||||
/* If this was a vmstate, saved in recording mode,
|
||||
we need to initialize replay data fields. */
|
||||
replay_fetch_data_kind();
|
||||
} else if (replay_mode == REPLAY_MODE_RECORD) {
|
||||
/* This is only useful for loading the initial state.
|
||||
Therefore reset all the counters. */
|
||||
state->instructions_count = 0;
|
||||
state->instruction_count = 0;
|
||||
state->block_request_id = 0;
|
||||
}
|
||||
|
||||
|
@ -49,19 +47,18 @@ static int replay_post_load(void *opaque, int version_id)
|
|||
|
||||
static const VMStateDescription vmstate_replay = {
|
||||
.name = "replay",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.pre_save = replay_pre_save,
|
||||
.post_load = replay_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_INT64_ARRAY(cached_clock, ReplayState, REPLAY_CLOCK_COUNT),
|
||||
VMSTATE_UINT64(current_step, ReplayState),
|
||||
VMSTATE_INT32(instructions_count, ReplayState),
|
||||
VMSTATE_UINT64(current_icount, ReplayState),
|
||||
VMSTATE_INT32(instruction_count, ReplayState),
|
||||
VMSTATE_UINT32(data_kind, ReplayState),
|
||||
VMSTATE_UINT32(has_unread_data, ReplayState),
|
||||
VMSTATE_UINT64(file_offset, ReplayState),
|
||||
VMSTATE_UINT64(block_request_id, ReplayState),
|
||||
VMSTATE_UINT64(host_clock_last, ReplayState),
|
||||
VMSTATE_INT32(read_event_kind, ReplayState),
|
||||
VMSTATE_UINT64(read_event_id, ReplayState),
|
||||
VMSTATE_INT32(read_event_checkpoint, ReplayState),
|
||||
|
|
|
@ -14,18 +14,19 @@
|
|||
#include "replay-internal.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
int64_t replay_save_clock(ReplayClockKind kind, int64_t clock, int64_t raw_icount)
|
||||
int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
|
||||
int64_t raw_icount)
|
||||
{
|
||||
if (replay_file) {
|
||||
g_assert(replay_mutex_locked());
|
||||
g_assert(replay_file);
|
||||
g_assert(replay_mutex_locked());
|
||||
|
||||
/* Due to the caller's locking requirements we get the icount from it
|
||||
* instead of using replay_save_instructions().
|
||||
*/
|
||||
replay_advance_current_step(raw_icount);
|
||||
replay_put_event(EVENT_CLOCK + kind);
|
||||
replay_put_qword(clock);
|
||||
}
|
||||
/*
|
||||
* Due to the caller's locking requirements we get the icount from it
|
||||
* instead of using replay_save_instructions().
|
||||
*/
|
||||
replay_advance_current_icount(raw_icount);
|
||||
replay_put_event(EVENT_CLOCK + kind);
|
||||
replay_put_qword(clock);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
@ -47,20 +48,15 @@ void replay_read_next_clock(ReplayClockKind kind)
|
|||
/*! Reads next clock event from the input. */
|
||||
int64_t replay_read_clock(ReplayClockKind kind)
|
||||
{
|
||||
int64_t ret;
|
||||
g_assert(replay_file && replay_mutex_locked());
|
||||
|
||||
replay_account_executed_instructions();
|
||||
|
||||
if (replay_file) {
|
||||
int64_t ret;
|
||||
if (replay_next_event_is(EVENT_CLOCK + kind)) {
|
||||
replay_read_next_clock(kind);
|
||||
}
|
||||
ret = replay_state.cached_clock[kind];
|
||||
|
||||
return ret;
|
||||
if (replay_next_event_is(EVENT_CLOCK + kind)) {
|
||||
replay_read_next_clock(kind);
|
||||
}
|
||||
ret = replay_state.cached_clock[kind];
|
||||
|
||||
error_report("REPLAY INTERNAL ERROR %d", __LINE__);
|
||||
exit(1);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -39,20 +39,20 @@ bool replay_next_event_is(int event)
|
|||
bool res = false;
|
||||
|
||||
/* nothing to skip - not all instructions used */
|
||||
if (replay_state.instructions_count != 0) {
|
||||
if (replay_state.instruction_count != 0) {
|
||||
assert(replay_state.data_kind == EVENT_INSTRUCTION);
|
||||
return event == EVENT_INSTRUCTION;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (event == replay_state.data_kind) {
|
||||
unsigned int data_kind = replay_state.data_kind;
|
||||
if (event == data_kind) {
|
||||
res = true;
|
||||
}
|
||||
switch (replay_state.data_kind) {
|
||||
switch (data_kind) {
|
||||
case EVENT_SHUTDOWN ... EVENT_SHUTDOWN_LAST:
|
||||
replay_finish_event();
|
||||
qemu_system_shutdown_request(replay_state.data_kind -
|
||||
EVENT_SHUTDOWN);
|
||||
qemu_system_shutdown_request(data_kind - EVENT_SHUTDOWN);
|
||||
break;
|
||||
default:
|
||||
/* clock, time_t, checkpoint and other events */
|
||||
|
@ -62,7 +62,7 @@ bool replay_next_event_is(int event)
|
|||
return res;
|
||||
}
|
||||
|
||||
uint64_t replay_get_current_step(void)
|
||||
uint64_t replay_get_current_icount(void)
|
||||
{
|
||||
return cpu_get_icount_raw();
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ int replay_get_instructions(void)
|
|||
int res = 0;
|
||||
replay_mutex_lock();
|
||||
if (replay_next_event_is(EVENT_INSTRUCTION)) {
|
||||
res = replay_state.instructions_count;
|
||||
res = replay_state.instruction_count;
|
||||
}
|
||||
replay_mutex_unlock();
|
||||
return res;
|
||||
|
@ -82,16 +82,16 @@ void replay_account_executed_instructions(void)
|
|||
{
|
||||
if (replay_mode == REPLAY_MODE_PLAY) {
|
||||
g_assert(replay_mutex_locked());
|
||||
if (replay_state.instructions_count > 0) {
|
||||
int count = (int)(replay_get_current_step()
|
||||
- replay_state.current_step);
|
||||
if (replay_state.instruction_count > 0) {
|
||||
int count = (int)(replay_get_current_icount()
|
||||
- replay_state.current_icount);
|
||||
|
||||
/* Time can only go forward */
|
||||
assert(count >= 0);
|
||||
|
||||
replay_state.instructions_count -= count;
|
||||
replay_state.current_step += count;
|
||||
if (replay_state.instructions_count == 0) {
|
||||
replay_state.instruction_count -= count;
|
||||
replay_state.current_icount += count;
|
||||
if (replay_state.instruction_count == 0) {
|
||||
assert(replay_state.data_kind == EVENT_INSTRUCTION);
|
||||
replay_finish_event();
|
||||
/* Wake up iothread. This is required because
|
||||
|
@ -273,8 +273,8 @@ static void replay_enable(const char *fname, int mode)
|
|||
replay_mutex_init();
|
||||
|
||||
replay_state.data_kind = -1;
|
||||
replay_state.instructions_count = 0;
|
||||
replay_state.current_step = 0;
|
||||
replay_state.instruction_count = 0;
|
||||
replay_state.current_icount = 0;
|
||||
replay_state.has_unread_data = 0;
|
||||
|
||||
/* skip file header for RECORD and check it for PLAY */
|
||||
|
|
|
@ -178,7 +178,11 @@ controls = [
|
|||
19: 'Conceal non-root operation from PT',
|
||||
20: 'Enable XSAVES/XRSTORS',
|
||||
22: 'Mode-based execute control (XS/XU)',
|
||||
23: 'Sub-page write permissions',
|
||||
24: 'GPA translation for PT',
|
||||
25: 'TSC scaling',
|
||||
26: 'User wait and pause',
|
||||
28: 'ENCLV exiting',
|
||||
},
|
||||
cap_msr = MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||
),
|
||||
|
@ -197,6 +201,7 @@ controls = [
|
|||
22: 'Save VMX-preemption timer value',
|
||||
23: 'Clear IA32_BNDCFGS',
|
||||
24: 'Conceal VM exits from PT',
|
||||
25: 'Clear IA32_RTIT_CTL',
|
||||
},
|
||||
cap_msr = MSR_IA32_VMX_EXIT_CTLS,
|
||||
true_cap_msr = MSR_IA32_VMX_TRUE_EXIT_CTLS,
|
||||
|
@ -214,6 +219,7 @@ controls = [
|
|||
15: 'Load IA32_EFER',
|
||||
16: 'Load IA32_BNDCFGS',
|
||||
17: 'Conceal VM entries from PT',
|
||||
18: 'Load IA32_RTIT_CTL',
|
||||
},
|
||||
cap_msr = MSR_IA32_VMX_ENTRY_CTLS,
|
||||
true_cap_msr = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
|
||||
|
@ -227,6 +233,7 @@ controls = [
|
|||
6: 'HLT activity state',
|
||||
7: 'Shutdown activity state',
|
||||
8: 'Wait-for-SIPI activity state',
|
||||
14: 'PT in VMX operation',
|
||||
15: 'IA32_SMBASE support',
|
||||
(16,24): 'Number of CR3-target values',
|
||||
(25,27): 'MSR-load/store count recommendation',
|
||||
|
@ -249,6 +256,7 @@ controls = [
|
|||
17: '1GB EPT pages',
|
||||
20: 'INVEPT supported',
|
||||
21: 'EPT accessed and dirty flags',
|
||||
22: 'Advanced VM-exit information for EPT violations',
|
||||
25: 'Single-context INVEPT',
|
||||
26: 'All-context INVEPT',
|
||||
32: 'INVVPID supported',
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
#include "cpu-qom.h"
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
#define ALIGNED_ONLY
|
||||
|
||||
/* Alpha processors have a weak memory model */
|
||||
#define TCG_GUEST_DEFAULT_MO (0)
|
||||
|
||||
|
|
|
@ -1332,7 +1332,6 @@ static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
|
|||
if (use_icount) {
|
||||
gen_io_start();
|
||||
helper(va);
|
||||
gen_io_end();
|
||||
return DISAS_PC_STALE;
|
||||
} else {
|
||||
helper(va);
|
||||
|
@ -2398,7 +2397,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
|
|||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
gen_helper_load_pcc(va, cpu_env);
|
||||
gen_io_end();
|
||||
ret = DISAS_PC_STALE;
|
||||
} else {
|
||||
gen_helper_load_pcc(va, cpu_env);
|
||||
|
|
|
@ -1775,7 +1775,6 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
|
|||
|
||||
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
|
||||
/* I/O operations must end the TB here (whether read or write) */
|
||||
gen_io_end();
|
||||
s->base.is_jmp = DISAS_UPDATE;
|
||||
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
|
||||
/* We default to ending the TB on a coprocessor register write,
|
||||
|
@ -2084,9 +2083,6 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
|
|||
|
||||
gen_helper_exception_return(cpu_env, dst);
|
||||
tcg_temp_free_i64(dst);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/* Must exit loop to check un-masked IRQs */
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
return;
|
||||
|
|
|
@ -3213,9 +3213,6 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_cpsr_write_eret(cpu_env, cpsr);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
tcg_temp_free_i32(cpsr);
|
||||
/* Must exit loop to check un-masked IRQs */
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
|
@ -7303,7 +7300,6 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
|
|||
|
||||
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
|
||||
/* I/O operations must end the TB here (whether read or write) */
|
||||
gen_io_end();
|
||||
gen_lookup_tb(s);
|
||||
} else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
|
||||
/* We default to ending the TB on a coprocessor register write,
|
||||
|
@ -9163,9 +9159,6 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_cpsr_write_eret(cpu_env, tmp);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
tcg_temp_free_i32(tmp);
|
||||
/* Must exit loop to check un-masked IRQs */
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
|
|
|
@ -3225,8 +3225,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
|
|||
|
||||
npc = dc->pc;
|
||||
|
||||
if (tb_cflags(tb) & CF_LAST_IO)
|
||||
gen_io_end();
|
||||
/* Force an update if the per-tb cpu state has changed. */
|
||||
if (dc->is_jmp == DISAS_NEXT
|
||||
&& (dc->cpustate_changed || !dc->flagx_known
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
basis. It's probably easier to fall back to a strong memory model. */
|
||||
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
|
||||
|
||||
#define ALIGNED_ONLY
|
||||
#define MMU_KERNEL_IDX 0
|
||||
#define MMU_USER_IDX 3
|
||||
#define MMU_PHYS_IDX 4
|
||||
|
|
|
@ -2161,7 +2161,6 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
|
|||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
gen_helper_read_interval_timer(tmp);
|
||||
gen_io_end();
|
||||
ctx->base.is_jmp = DISAS_IAQ_N_STALE;
|
||||
} else {
|
||||
gen_helper_read_interval_timer(tmp);
|
||||
|
|
|
@ -770,6 +770,7 @@ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
|
|||
/* CPUID_7_0_ECX_OSPKE is dynamic */ \
|
||||
CPUID_7_0_ECX_LA57)
|
||||
#define TCG_7_0_EDX_FEATURES 0
|
||||
#define TCG_7_1_EAX_FEATURES 0
|
||||
#define TCG_APM_FEATURES 0
|
||||
#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
|
||||
#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
|
||||
|
@ -906,7 +907,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
|||
"kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
|
||||
"kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
|
||||
NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
|
||||
NULL, NULL, NULL, NULL,
|
||||
"kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
"kvmclock-stable-bit", NULL, NULL, NULL,
|
||||
|
@ -1095,6 +1096,25 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
|||
},
|
||||
.tcg_features = TCG_7_0_EDX_FEATURES,
|
||||
},
|
||||
[FEAT_7_1_EAX] = {
|
||||
.type = CPUID_FEATURE_WORD,
|
||||
.feat_names = {
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, "avx512-bf16", NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
},
|
||||
.cpuid = {
|
||||
.eax = 7,
|
||||
.needs_ecx = true, .ecx = 1,
|
||||
.reg = R_EAX,
|
||||
},
|
||||
.tcg_features = TCG_7_1_EAX_FEATURES,
|
||||
},
|
||||
[FEAT_8000_0007_EDX] = {
|
||||
.type = CPUID_FEATURE_WORD,
|
||||
.feat_names = {
|
||||
|
@ -4292,13 +4312,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|||
case 7:
|
||||
/* Structured Extended Feature Flags Enumeration Leaf */
|
||||
if (count == 0) {
|
||||
*eax = 0; /* Maximum ECX value for sub-leaves */
|
||||
/* Maximum ECX value for sub-leaves */
|
||||
*eax = env->cpuid_level_func7;
|
||||
*ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
|
||||
*ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
|
||||
if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
|
||||
*ecx |= CPUID_7_0_ECX_OSPKE;
|
||||
}
|
||||
*edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
|
||||
} else if (count == 1) {
|
||||
*eax = env->features[FEAT_7_1_EAX];
|
||||
*ebx = 0;
|
||||
*ecx = 0;
|
||||
*edx = 0;
|
||||
} else {
|
||||
*eax = 0;
|
||||
*ebx = 0;
|
||||
|
@ -4948,6 +4974,11 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
|
|||
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
|
||||
break;
|
||||
}
|
||||
|
||||
if (eax == 7) {
|
||||
x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
|
||||
fi->cpuid.ecx);
|
||||
}
|
||||
}
|
||||
|
||||
/* Calculate XSAVE components based on the configured CPU feature flags */
|
||||
|
@ -5066,6 +5097,7 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
|
|||
x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
|
||||
x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
|
||||
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
|
||||
x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
|
||||
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
|
||||
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
|
||||
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
|
||||
|
@ -5097,6 +5129,9 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
|
|||
}
|
||||
|
||||
/* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
|
||||
if (env->cpuid_level_func7 == UINT32_MAX) {
|
||||
env->cpuid_level_func7 = env->cpuid_min_level_func7;
|
||||
}
|
||||
if (env->cpuid_level == UINT32_MAX) {
|
||||
env->cpuid_level = env->cpuid_min_level;
|
||||
}
|
||||
|
@ -5660,6 +5695,8 @@ static void x86_cpu_initfn(Object *obj)
|
|||
object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
|
||||
object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
|
||||
object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
|
||||
object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
|
||||
&error_abort);
|
||||
object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
|
||||
object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
|
||||
object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
|
||||
|
@ -5868,6 +5905,8 @@ static Property x86_cpu_properties[] = {
|
|||
DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
|
||||
DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
|
||||
DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
|
||||
DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
|
||||
UINT32_MAX),
|
||||
DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
|
||||
DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
|
||||
DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
|
||||
|
|
|
@ -479,6 +479,7 @@ typedef enum FeatureWord {
|
|||
FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
|
||||
FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
|
||||
FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
|
||||
FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
|
||||
FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
|
||||
FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
|
||||
FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
|
||||
|
@ -692,6 +693,8 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
|
|||
#define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) /*Core Capability*/
|
||||
#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) /* AVX512 BFloat16 Instruction */
|
||||
|
||||
#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and
|
||||
do not invalidate cache */
|
||||
#define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */
|
||||
|
@ -1260,6 +1263,7 @@ typedef struct CPUX86State {
|
|||
uint64_t steal_time_msr;
|
||||
uint64_t async_pf_en_msr;
|
||||
uint64_t pv_eoi_en_msr;
|
||||
uint64_t poll_control_msr;
|
||||
|
||||
/* Partition-wide HV MSRs, will be updated only on the first vcpu */
|
||||
uint64_t msr_hv_hypercall;
|
||||
|
@ -1322,6 +1326,10 @@ typedef struct CPUX86State {
|
|||
/* Fields after this point are preserved across CPU reset. */
|
||||
|
||||
/* processor features (e.g. for CPUID insn) */
|
||||
/* Minimum cpuid leaf 7 value */
|
||||
uint32_t cpuid_level_func7;
|
||||
/* Actual cpuid leaf 7 value */
|
||||
uint32_t cpuid_min_level_func7;
|
||||
/* Minimum level/xlevel/xlevel2, based on CPU model + features */
|
||||
uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
|
||||
/* Maximum level/xlevel/xlevel2 value for auto-assignment: */
|
||||
|
|
|
@ -193,6 +193,7 @@ static int kvm_get_tsc(CPUState *cs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
memset(&msr_data, 0, sizeof(msr_data));
|
||||
msr_data.info.nmsrs = 1;
|
||||
msr_data.entries[0].index = MSR_IA32_TSC;
|
||||
env->tsc_valid = !runstate_is_running();
|
||||
|
@ -1500,6 +1501,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|||
c = &cpuid_data.entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
case 0x7:
|
||||
case 0x14: {
|
||||
uint32_t times;
|
||||
|
||||
|
@ -1512,7 +1514,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|||
for (j = 1; j <= times; ++j) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x14,ecx:0x%x)\n", j);
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
|
@ -1709,6 +1711,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|||
|
||||
if (has_xsave) {
|
||||
env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
|
||||
memset(env->xsave_buf, 0, sizeof(struct kvm_xsave));
|
||||
}
|
||||
|
||||
max_nested_state_len = kvm_max_nested_state_length();
|
||||
|
@ -1785,6 +1788,8 @@ void kvm_arch_reset_vcpu(X86CPU *cpu)
|
|||
|
||||
hyperv_x86_synic_reset(cpu);
|
||||
}
|
||||
/* enabled by default */
|
||||
env->poll_control_msr = 1;
|
||||
}
|
||||
|
||||
void kvm_arch_do_init_vcpu(X86CPU *cpu)
|
||||
|
@ -1840,108 +1845,105 @@ static int kvm_get_supported_feature_msrs(KVMState *s)
|
|||
|
||||
static int kvm_get_supported_msrs(KVMState *s)
|
||||
{
|
||||
static int kvm_supported_msrs;
|
||||
int ret = 0;
|
||||
struct kvm_msr_list msr_list, *kvm_msr_list;
|
||||
|
||||
/* first time */
|
||||
if (kvm_supported_msrs == 0) {
|
||||
struct kvm_msr_list msr_list, *kvm_msr_list;
|
||||
/*
|
||||
* Obtain MSR list from KVM. These are the MSRs that we must
|
||||
* save/restore.
|
||||
*/
|
||||
msr_list.nmsrs = 0;
|
||||
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
|
||||
if (ret < 0 && ret != -E2BIG) {
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Old kernel modules had a bug and could write beyond the provided
|
||||
* memory. Allocate at least a safe amount of 1K.
|
||||
*/
|
||||
kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
|
||||
msr_list.nmsrs *
|
||||
sizeof(msr_list.indices[0])));
|
||||
|
||||
kvm_supported_msrs = -1;
|
||||
kvm_msr_list->nmsrs = msr_list.nmsrs;
|
||||
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
|
||||
if (ret >= 0) {
|
||||
int i;
|
||||
|
||||
/* Obtain MSR list from KVM. These are the MSRs that we must
|
||||
* save/restore */
|
||||
msr_list.nmsrs = 0;
|
||||
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
|
||||
if (ret < 0 && ret != -E2BIG) {
|
||||
return ret;
|
||||
}
|
||||
/* Old kernel modules had a bug and could write beyond the provided
|
||||
memory. Allocate at least a safe amount of 1K. */
|
||||
kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
|
||||
msr_list.nmsrs *
|
||||
sizeof(msr_list.indices[0])));
|
||||
|
||||
kvm_msr_list->nmsrs = msr_list.nmsrs;
|
||||
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
|
||||
if (ret >= 0) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
|
||||
switch (kvm_msr_list->indices[i]) {
|
||||
case MSR_STAR:
|
||||
has_msr_star = true;
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
has_msr_hsave_pa = true;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
has_msr_tsc_aux = true;
|
||||
break;
|
||||
case MSR_TSC_ADJUST:
|
||||
has_msr_tsc_adjust = true;
|
||||
break;
|
||||
case MSR_IA32_TSCDEADLINE:
|
||||
has_msr_tsc_deadline = true;
|
||||
break;
|
||||
case MSR_IA32_SMBASE:
|
||||
has_msr_smbase = true;
|
||||
break;
|
||||
case MSR_SMI_COUNT:
|
||||
has_msr_smi_count = true;
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
has_msr_misc_enable = true;
|
||||
break;
|
||||
case MSR_IA32_BNDCFGS:
|
||||
has_msr_bndcfgs = true;
|
||||
break;
|
||||
case MSR_IA32_XSS:
|
||||
has_msr_xss = true;
|
||||
break;
|
||||
case HV_X64_MSR_CRASH_CTL:
|
||||
has_msr_hv_crash = true;
|
||||
break;
|
||||
case HV_X64_MSR_RESET:
|
||||
has_msr_hv_reset = true;
|
||||
break;
|
||||
case HV_X64_MSR_VP_INDEX:
|
||||
has_msr_hv_vpindex = true;
|
||||
break;
|
||||
case HV_X64_MSR_VP_RUNTIME:
|
||||
has_msr_hv_runtime = true;
|
||||
break;
|
||||
case HV_X64_MSR_SCONTROL:
|
||||
has_msr_hv_synic = true;
|
||||
break;
|
||||
case HV_X64_MSR_STIMER0_CONFIG:
|
||||
has_msr_hv_stimer = true;
|
||||
break;
|
||||
case HV_X64_MSR_TSC_FREQUENCY:
|
||||
has_msr_hv_frequencies = true;
|
||||
break;
|
||||
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
|
||||
has_msr_hv_reenlightenment = true;
|
||||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
has_msr_spec_ctrl = true;
|
||||
break;
|
||||
case MSR_VIRT_SSBD:
|
||||
has_msr_virt_ssbd = true;
|
||||
break;
|
||||
case MSR_IA32_ARCH_CAPABILITIES:
|
||||
has_msr_arch_capabs = true;
|
||||
break;
|
||||
case MSR_IA32_CORE_CAPABILITY:
|
||||
has_msr_core_capabs = true;
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
|
||||
switch (kvm_msr_list->indices[i]) {
|
||||
case MSR_STAR:
|
||||
has_msr_star = true;
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
has_msr_hsave_pa = true;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
has_msr_tsc_aux = true;
|
||||
break;
|
||||
case MSR_TSC_ADJUST:
|
||||
has_msr_tsc_adjust = true;
|
||||
break;
|
||||
case MSR_IA32_TSCDEADLINE:
|
||||
has_msr_tsc_deadline = true;
|
||||
break;
|
||||
case MSR_IA32_SMBASE:
|
||||
has_msr_smbase = true;
|
||||
break;
|
||||
case MSR_SMI_COUNT:
|
||||
has_msr_smi_count = true;
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
has_msr_misc_enable = true;
|
||||
break;
|
||||
case MSR_IA32_BNDCFGS:
|
||||
has_msr_bndcfgs = true;
|
||||
break;
|
||||
case MSR_IA32_XSS:
|
||||
has_msr_xss = true;
|
||||
break;
|
||||
case HV_X64_MSR_CRASH_CTL:
|
||||
has_msr_hv_crash = true;
|
||||
break;
|
||||
case HV_X64_MSR_RESET:
|
||||
has_msr_hv_reset = true;
|
||||
break;
|
||||
case HV_X64_MSR_VP_INDEX:
|
||||
has_msr_hv_vpindex = true;
|
||||
break;
|
||||
case HV_X64_MSR_VP_RUNTIME:
|
||||
has_msr_hv_runtime = true;
|
||||
break;
|
||||
case HV_X64_MSR_SCONTROL:
|
||||
has_msr_hv_synic = true;
|
||||
break;
|
||||
case HV_X64_MSR_STIMER0_CONFIG:
|
||||
has_msr_hv_stimer = true;
|
||||
break;
|
||||
case HV_X64_MSR_TSC_FREQUENCY:
|
||||
has_msr_hv_frequencies = true;
|
||||
break;
|
||||
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
|
||||
has_msr_hv_reenlightenment = true;
|
||||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
has_msr_spec_ctrl = true;
|
||||
break;
|
||||
case MSR_VIRT_SSBD:
|
||||
has_msr_virt_ssbd = true;
|
||||
break;
|
||||
case MSR_IA32_ARCH_CAPABILITIES:
|
||||
has_msr_arch_capabs = true;
|
||||
break;
|
||||
case MSR_IA32_CORE_CAPABILITY:
|
||||
has_msr_core_capabs = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
g_free(kvm_msr_list);
|
||||
}
|
||||
|
||||
g_free(kvm_msr_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2493,6 +2495,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
|||
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
|
||||
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
|
||||
}
|
||||
|
||||
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
|
||||
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
|
||||
}
|
||||
|
||||
if (has_architectural_pmu_version > 0) {
|
||||
if (has_architectural_pmu_version > 1) {
|
||||
/* Stop the counter. */
|
||||
|
@ -2878,6 +2885,9 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|||
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
|
||||
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
|
||||
}
|
||||
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
|
||||
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
|
||||
}
|
||||
if (has_architectural_pmu_version > 0) {
|
||||
if (has_architectural_pmu_version > 1) {
|
||||
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
|
||||
|
@ -3112,6 +3122,10 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|||
case MSR_KVM_STEAL_TIME:
|
||||
env->steal_time_msr = msrs[i].data;
|
||||
break;
|
||||
case MSR_KVM_POLL_CONTROL: {
|
||||
env->poll_control_msr = msrs[i].data;
|
||||
break;
|
||||
}
|
||||
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
||||
env->msr_fixed_ctr_ctrl = msrs[i].data;
|
||||
break;
|
||||
|
@ -3480,6 +3494,7 @@ static int kvm_put_debugregs(X86CPU *cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
memset(&dbgregs, 0, sizeof(dbgregs));
|
||||
for (i = 0; i < 4; i++) {
|
||||
dbgregs.db[i] = env->dr[i];
|
||||
}
|
||||
|
|
|
@ -437,6 +437,14 @@ static const VMStateDescription vmstate_exception_info = {
|
|||
}
|
||||
};
|
||||
|
||||
/* Poll control MSR enabled by default */
|
||||
static bool poll_control_msr_needed(void *opaque)
|
||||
{
|
||||
X86CPU *cpu = opaque;
|
||||
|
||||
return cpu->env.poll_control_msr != 1;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_steal_time_msr = {
|
||||
.name = "cpu/steal_time_msr",
|
||||
.version_id = 1,
|
||||
|
@ -470,6 +478,17 @@ static const VMStateDescription vmstate_pv_eoi_msr = {
|
|||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_poll_control_msr = {
|
||||
.name = "cpu/poll_control_msr",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = poll_control_msr_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64(env.poll_control_msr, X86CPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool fpop_ip_dp_needed(void *opaque)
|
||||
{
|
||||
X86CPU *cpu = opaque;
|
||||
|
@ -1354,6 +1373,7 @@ VMStateDescription vmstate_x86_cpu = {
|
|||
&vmstate_async_pf_msr,
|
||||
&vmstate_pv_eoi_msr,
|
||||
&vmstate_steal_time_msr,
|
||||
&vmstate_poll_control_msr,
|
||||
&vmstate_fpop_ip_dp,
|
||||
&vmstate_msr_tsc_adjust,
|
||||
&vmstate_msr_tscdeadline,
|
||||
|
|
|
@ -710,102 +710,134 @@ void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val)
|
|||
#endif
|
||||
|
||||
/* float to integer */
|
||||
|
||||
/*
|
||||
* x86 mandates that we return the indefinite integer value for the result
|
||||
* of any float-to-integer conversion that raises the 'invalid' exception.
|
||||
* Wrap the softfloat functions to get this behaviour.
|
||||
*/
|
||||
#define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \
|
||||
static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \
|
||||
{ \
|
||||
int oldflags, newflags; \
|
||||
RETTYPE r; \
|
||||
\
|
||||
oldflags = get_float_exception_flags(s); \
|
||||
set_float_exception_flags(0, s); \
|
||||
r = FN(a, s); \
|
||||
newflags = get_float_exception_flags(s); \
|
||||
if (newflags & float_flag_invalid) { \
|
||||
r = INDEFVALUE; \
|
||||
} \
|
||||
set_float_exception_flags(newflags | oldflags, s); \
|
||||
return r; \
|
||||
}
|
||||
|
||||
WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN)
|
||||
WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN)
|
||||
WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN)
|
||||
WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN)
|
||||
WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN)
|
||||
WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN)
|
||||
WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN)
|
||||
WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN)
|
||||
|
||||
void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
d->ZMM_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
|
||||
d->ZMM_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
|
||||
d->ZMM_L(2) = float32_to_int32(s->ZMM_S(2), &env->sse_status);
|
||||
d->ZMM_L(3) = float32_to_int32(s->ZMM_S(3), &env->sse_status);
|
||||
d->ZMM_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status);
|
||||
d->ZMM_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status);
|
||||
d->ZMM_L(2) = x86_float32_to_int32(s->ZMM_S(2), &env->sse_status);
|
||||
d->ZMM_L(3) = x86_float32_to_int32(s->ZMM_S(3), &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
d->ZMM_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
|
||||
d->ZMM_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
|
||||
d->ZMM_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status);
|
||||
d->ZMM_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status);
|
||||
d->ZMM_Q(1) = 0;
|
||||
}
|
||||
|
||||
void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
|
||||
{
|
||||
d->MMX_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
|
||||
d->MMX_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
|
||||
d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status);
|
||||
d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
|
||||
{
|
||||
d->MMX_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
|
||||
d->MMX_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
|
||||
d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status);
|
||||
d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status);
|
||||
}
|
||||
|
||||
int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float32_to_int32(s->ZMM_S(0), &env->sse_status);
|
||||
return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status);
|
||||
}
|
||||
|
||||
int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float64_to_int32(s->ZMM_D(0), &env->sse_status);
|
||||
return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float32_to_int64(s->ZMM_S(0), &env->sse_status);
|
||||
return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status);
|
||||
}
|
||||
|
||||
int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float64_to_int64(s->ZMM_D(0), &env->sse_status);
|
||||
return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* float to integer truncated */
|
||||
void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
d->ZMM_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
d->ZMM_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
|
||||
d->ZMM_L(2) = float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status);
|
||||
d->ZMM_L(3) = float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status);
|
||||
d->ZMM_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
d->ZMM_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
|
||||
d->ZMM_L(2) = x86_float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status);
|
||||
d->ZMM_L(3) = x86_float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
d->ZMM_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
d->ZMM_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
|
||||
d->ZMM_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
d->ZMM_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
|
||||
d->ZMM_Q(1) = 0;
|
||||
}
|
||||
|
||||
void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
|
||||
{
|
||||
d->MMX_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
d->MMX_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
|
||||
d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
|
||||
{
|
||||
d->MMX_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
d->MMX_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
|
||||
d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
|
||||
}
|
||||
|
||||
int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
}
|
||||
|
||||
int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status);
|
||||
}
|
||||
|
||||
int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
|
||||
{
|
||||
return float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -5381,7 +5381,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_op_mov_reg_v(s, dflag, rm, s->T0);
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -6443,7 +6442,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
|
||||
gen_bpt_io(s, s->tmp2_i32, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -6464,7 +6462,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
|
||||
gen_bpt_io(s, s->tmp2_i32, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -6482,7 +6479,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
|
||||
gen_bpt_io(s, s->tmp2_i32, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -6502,7 +6498,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
|
||||
gen_bpt_io(s, s->tmp2_i32, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -7206,7 +7201,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
}
|
||||
gen_helper_rdtsc(cpu_env);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -7666,7 +7660,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
}
|
||||
gen_helper_rdtscp(cpu_env);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
break;
|
||||
|
@ -8036,9 +8029,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
||||
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
|
||||
s->T0);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
gen_jmp_im(s, s->pc - s->cs_base);
|
||||
gen_eob(s);
|
||||
} else {
|
||||
|
|
|
@ -885,9 +885,6 @@ static void dec_wcsr(DisasContext *dc)
|
|||
}
|
||||
gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
|
||||
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
|
||||
if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
dc->is_jmp = DISAS_UPDATE;
|
||||
break;
|
||||
case CSR_IP:
|
||||
|
@ -897,9 +894,6 @@ static void dec_wcsr(DisasContext *dc)
|
|||
}
|
||||
gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
|
||||
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
|
||||
if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
dc->is_jmp = DISAS_UPDATE;
|
||||
break;
|
||||
case CSR_ICC:
|
||||
|
@ -1111,9 +1105,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
|
|||
&& (dc->pc - page_start < TARGET_PAGE_SIZE)
|
||||
&& num_insns < max_insns);
|
||||
|
||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
||||
gen_io_end();
|
||||
}
|
||||
|
||||
if (unlikely(cs->singlestep_enabled)) {
|
||||
if (dc->is_jmp == DISAS_NEXT) {
|
||||
|
|
|
@ -1724,8 +1724,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
|
|||
npc = dc->jmp_pc;
|
||||
}
|
||||
|
||||
if (tb_cflags(tb) & CF_LAST_IO)
|
||||
gen_io_end();
|
||||
/* Force an update if the per-tb cpu state has changed. */
|
||||
if (dc->is_jmp == DISAS_NEXT
|
||||
&& (dc->cpustate_changed || org_flags != dc->tb_flags)) {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#ifndef MIPS_CPU_H
|
||||
#define MIPS_CPU_H
|
||||
|
||||
#define ALIGNED_ONLY
|
||||
|
||||
#include "cpu-qom.h"
|
||||
#include "exec/cpu-defs.h"
|
||||
#include "fpu/softfloat-types.h"
|
||||
|
|
|
@ -7129,9 +7129,6 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_mfc0_count(arg, cpu_env);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/*
|
||||
* Break the TB to be able to take timer interrupts immediately
|
||||
* after reading count. DISAS_STOP isn't sufficient, we need to
|
||||
|
@ -8296,7 +8293,6 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
|||
|
||||
/* For simplicity assume that all writes can cause interrupts. */
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
/*
|
||||
* DISAS_STOP isn't sufficient, we need to ensure we break out of
|
||||
* translated code to check for pending interrupts.
|
||||
|
@ -8607,9 +8603,6 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_mfc0_count(arg, cpu_env);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/*
|
||||
* Break the TB to be able to take timer interrupts immediately
|
||||
* after reading count. DISAS_STOP isn't sufficient, we need to
|
||||
|
@ -9748,7 +9741,6 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
|||
|
||||
/* For simplicity assume that all writes can cause interrupts. */
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
/*
|
||||
* DISAS_STOP isn't sufficient, we need to ensure we break out of
|
||||
* translated code to check for pending interrupts.
|
||||
|
@ -12817,9 +12809,6 @@ static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_rdhwr_cc(t0, cpu_env);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
gen_store_gpr(t0, rt);
|
||||
/*
|
||||
* Break the TB to be able to take timer interrupts immediately
|
||||
|
|
|
@ -862,10 +862,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
|
|||
!tcg_op_buf_full() &&
|
||||
num_insns < max_insns);
|
||||
|
||||
if (tb_cflags(tb) & CF_LAST_IO) {
|
||||
gen_io_end();
|
||||
}
|
||||
|
||||
/* Indicate where the next block should start */
|
||||
switch (dc->is_jmp) {
|
||||
case DISAS_NEXT:
|
||||
|
|
|
@ -1861,7 +1861,6 @@ static void gen_darn(DisasContext *ctx)
|
|||
gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
|
||||
}
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
}
|
||||
|
@ -3991,9 +3990,6 @@ static void gen_rfi(DisasContext *ctx)
|
|||
gen_update_cfar(ctx, ctx->base.pc_next - 4);
|
||||
gen_helper_rfi(cpu_env);
|
||||
gen_sync_exception(ctx);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -4011,9 +4007,6 @@ static void gen_rfid(DisasContext *ctx)
|
|||
gen_update_cfar(ctx, ctx->base.pc_next - 4);
|
||||
gen_helper_rfid(cpu_env);
|
||||
gen_sync_exception(ctx);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -4389,9 +4382,6 @@ static void gen_mtmsrd(DisasContext *ctx)
|
|||
/* Must stop the translation as machine state (may have) changed */
|
||||
/* Note that mtmsr is not always defined as context-synchronizing */
|
||||
gen_stop_exception(ctx);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
}
|
||||
#endif /* !defined(CONFIG_USER_ONLY) */
|
||||
}
|
||||
|
@ -4429,9 +4419,6 @@ static void gen_mtmsr(DisasContext *ctx)
|
|||
tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]);
|
||||
#endif
|
||||
gen_helper_store_msr(cpu_env, msr);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
tcg_temp_free(msr);
|
||||
/* Must stop the translation as machine state (may have) changed */
|
||||
/* Note that mtmsr is not always defined as context-synchronizing */
|
||||
|
|
|
@ -189,7 +189,6 @@ static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
|
|||
}
|
||||
gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +200,6 @@ static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
|
|||
}
|
||||
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -511,7 +511,6 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
|
|||
} while (0)
|
||||
|
||||
#define RISCV_OP_CSR_POST do {\
|
||||
gen_io_end(); \
|
||||
gen_set_gpr(a->rd, dest); \
|
||||
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); \
|
||||
exit_tb(ctx); \
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
#include "cpu-qom.h"
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
#define ALIGNED_ONLY
|
||||
|
||||
/* CPU Subtypes */
|
||||
#define SH_CPU_SH7750 (1 << 0)
|
||||
#define SH_CPU_SH7750S (1 << 1)
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#include "cpu-qom.h"
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
#define ALIGNED_ONLY
|
||||
|
||||
#if !defined(TARGET_SPARC64)
|
||||
#define TARGET_DPREGS 16
|
||||
#else
|
||||
|
|
|
@ -4412,10 +4412,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
|||
gen_helper_tick_set_limit(r_tickptr,
|
||||
cpu_tick_cmpr);
|
||||
tcg_temp_free_ptr(r_tickptr);
|
||||
if (tb_cflags(dc->base.tb) &
|
||||
CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/* End TB to handle timer interrupt */
|
||||
dc->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
@ -4440,10 +4436,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
|||
gen_helper_tick_set_count(r_tickptr,
|
||||
cpu_tmp0);
|
||||
tcg_temp_free_ptr(r_tickptr);
|
||||
if (tb_cflags(dc->base.tb) &
|
||||
CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/* End TB to handle timer interrupt */
|
||||
dc->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
@ -4468,10 +4460,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
|||
gen_helper_tick_set_limit(r_tickptr,
|
||||
cpu_stick_cmpr);
|
||||
tcg_temp_free_ptr(r_tickptr);
|
||||
if (tb_cflags(dc->base.tb) &
|
||||
CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/* End TB to handle timer interrupt */
|
||||
dc->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
@ -4588,10 +4576,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
|
|||
gen_helper_tick_set_count(r_tickptr,
|
||||
cpu_tmp0);
|
||||
tcg_temp_free_ptr(r_tickptr);
|
||||
if (tb_cflags(dc->base.tb) &
|
||||
CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
/* End TB to handle timer interrupt */
|
||||
dc->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
|
|
@ -1931,7 +1931,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
|
|||
code. */
|
||||
cpu_abort(cs, "IO on conditional branch instruction");
|
||||
}
|
||||
gen_io_end();
|
||||
}
|
||||
|
||||
/* At this stage dc->condjmp will only be set when the skipped
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
#include "exec/cpu-defs.h"
|
||||
#include "xtensa-isa.h"
|
||||
|
||||
#define ALIGNED_ONLY
|
||||
|
||||
/* Xtensa processors have a weak memory model */
|
||||
#define TCG_GUEST_DEFAULT_MO (0)
|
||||
|
||||
|
|
|
@ -539,9 +539,6 @@ static void gen_waiti(DisasContext *dc, uint32_t imm4)
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_waiti(cpu_env, pc, intlevel);
|
||||
if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
tcg_temp_free(pc);
|
||||
tcg_temp_free(intlevel);
|
||||
}
|
||||
|
@ -2215,9 +2212,6 @@ static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[],
|
|||
}
|
||||
gen_helper_update_ccount(cpu_env);
|
||||
tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
|
||||
if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2607,9 +2601,6 @@ static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[],
|
|||
tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
|
||||
gen_helper_update_ccompare(cpu_env, tmp);
|
||||
tcg_temp_free(tmp);
|
||||
if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2621,9 +2612,6 @@ static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[],
|
|||
gen_io_start();
|
||||
}
|
||||
gen_helper_wsr_ccount(cpu_env, arg[0].in);
|
||||
if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2830,9 +2818,6 @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[],
|
|||
tcg_gen_mov_i32(arg[0].out, tmp);
|
||||
tcg_temp_free(tmp);
|
||||
|
||||
if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -1925,7 +1925,7 @@ static const char * const ldst_name[] =
|
|||
};
|
||||
|
||||
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
|
||||
#ifdef ALIGNED_ONLY
|
||||
#ifdef TARGET_ALIGNED_ONLY
|
||||
[MO_UNALN >> MO_ASHIFT] = "un+",
|
||||
[MO_ALIGN >> MO_ASHIFT] = "",
|
||||
#else
|
||||
|
|
|
@ -333,10 +333,12 @@ typedef enum TCGMemOp {
|
|||
MO_TE = MO_LE,
|
||||
#endif
|
||||
|
||||
/* MO_UNALN accesses are never checked for alignment.
|
||||
/*
|
||||
* MO_UNALN accesses are never checked for alignment.
|
||||
* MO_ALIGN accesses will result in a call to the CPU's
|
||||
* do_unaligned_access hook if the guest address is not aligned.
|
||||
* The default depends on whether the target CPU defines ALIGNED_ONLY.
|
||||
* The default depends on whether the target CPU defines
|
||||
* TARGET_ALIGNED_ONLY.
|
||||
*
|
||||
* Some architectures (e.g. ARMv8) need the address which is aligned
|
||||
* to a size more than the size of the memory access.
|
||||
|
@ -353,7 +355,7 @@ typedef enum TCGMemOp {
|
|||
*/
|
||||
MO_ASHIFT = 4,
|
||||
MO_AMASK = 7 << MO_ASHIFT,
|
||||
#ifdef ALIGNED_ONLY
|
||||
#ifdef TARGET_ALIGNED_ONLY
|
||||
MO_ALIGN = 0,
|
||||
MO_UNALN = MO_AMASK,
|
||||
#else
|
||||
|
|
|
@ -88,9 +88,9 @@ int64_t qemu_clock_get_ns(QEMUClockType type)
|
|||
return ptimer_test_time_ns;
|
||||
}
|
||||
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type)
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
|
||||
{
|
||||
QEMUTimerList *timer_list = main_loop_tlg.tl[type];
|
||||
QEMUTimerList *timer_list = main_loop_tlg.tl[QEMU_CLOCK_VIRTUAL];
|
||||
QEMUTimer *t = timer_list->active_timers.next;
|
||||
int64_t deadline = -1;
|
||||
|
||||
|
|
|
@ -50,13 +50,15 @@ static void ptimer_test_set_qemu_time_ns(int64_t ns)
|
|||
|
||||
static void qemu_clock_step(uint64_t ns)
|
||||
{
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
int64_t advanced_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns;
|
||||
|
||||
while (deadline != -1 && deadline <= advanced_time) {
|
||||
ptimer_test_set_qemu_time_ns(deadline);
|
||||
ptimer_test_expire_qemu_timers(deadline, QEMU_CLOCK_VIRTUAL);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
}
|
||||
|
||||
ptimer_test_set_qemu_time_ns(advanced_time);
|
||||
|
|
|
@ -67,6 +67,18 @@ static void bitmap_set_case(bmap_set_func set_func)
|
|||
|
||||
bmap = bitmap_new(BMAP_SIZE);
|
||||
|
||||
/* Set one bit at offset in second word */
|
||||
for (offset = 0; offset <= BITS_PER_LONG; offset++) {
|
||||
bitmap_clear(bmap, 0, BMAP_SIZE);
|
||||
set_func(bmap, BITS_PER_LONG + offset, 1);
|
||||
g_assert_cmpint(find_first_bit(bmap, 2 * BITS_PER_LONG),
|
||||
==, BITS_PER_LONG + offset);
|
||||
g_assert_cmpint(find_next_zero_bit(bmap,
|
||||
3 * BITS_PER_LONG,
|
||||
BITS_PER_LONG + offset),
|
||||
==, BITS_PER_LONG + offset + 1);
|
||||
}
|
||||
|
||||
/* Both Aligned, set bits [BITS_PER_LONG, 3*BITS_PER_LONG] */
|
||||
set_func(bmap, BITS_PER_LONG, 2 * BITS_PER_LONG);
|
||||
g_assert_cmpuint(bmap[1], ==, -1ul);
|
||||
|
|
|
@ -444,16 +444,14 @@ static void test_visitor_in_fuzz(TestInputVisitorData *data,
|
|||
char buf[10000];
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
unsigned int j;
|
||||
unsigned int j, k;
|
||||
|
||||
j = g_test_rand_int_range(0, sizeof(buf) - 1);
|
||||
|
||||
buf[j] = '\0';
|
||||
|
||||
if (j != 0) {
|
||||
for (j--; j != 0; j--) {
|
||||
buf[j - 1] = (char)g_test_rand_int_range(0, 256);
|
||||
}
|
||||
for (k = 0; k != j; k++) {
|
||||
buf[k] = (char)g_test_rand_int_range(0, 256);
|
||||
}
|
||||
|
||||
v = visitor_input_test_init(data, buf);
|
||||
|
|
|
@ -558,6 +558,8 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
|
|||
BucketType index;
|
||||
int i;
|
||||
|
||||
throttle_config_init(&cfg);
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
BucketType index = to_test[is_ops][i];
|
||||
cfg.buckets[index].avg = avg;
|
||||
|
|
|
@ -47,9 +47,6 @@ typedef struct QEMUClock {
|
|||
/* We rely on BQL to protect the timerlists */
|
||||
QLIST_HEAD(, QEMUTimerList) timerlists;
|
||||
|
||||
NotifierList reset_notifiers;
|
||||
int64_t last;
|
||||
|
||||
QEMUClockType type;
|
||||
bool enabled;
|
||||
} QEMUClock;
|
||||
|
@ -130,9 +127,7 @@ static void qemu_clock_init(QEMUClockType type, QEMUTimerListNotifyCB *notify_cb
|
|||
|
||||
clock->type = type;
|
||||
clock->enabled = (type == QEMU_CLOCK_VIRTUAL ? false : true);
|
||||
clock->last = INT64_MIN;
|
||||
QLIST_INIT(&clock->timerlists);
|
||||
notifier_list_init(&clock->reset_notifiers);
|
||||
main_loop_tlg.tl[type] = timerlist_new(type, notify_cb, NULL);
|
||||
}
|
||||
|
||||
|
@ -252,14 +247,38 @@ int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
|
|||
* ignore whether or not the clock should be used in deadline
|
||||
* calculations.
|
||||
*/
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type)
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
|
||||
{
|
||||
int64_t deadline = -1;
|
||||
int64_t delta;
|
||||
int64_t expire_time;
|
||||
QEMUTimer *ts;
|
||||
QEMUTimerList *timer_list;
|
||||
QEMUClock *clock = qemu_clock_ptr(type);
|
||||
|
||||
if (!clock->enabled) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
QLIST_FOREACH(timer_list, &clock->timerlists, list) {
|
||||
deadline = qemu_soonest_timeout(deadline,
|
||||
timerlist_deadline_ns(timer_list));
|
||||
qemu_mutex_lock(&timer_list->active_timers_lock);
|
||||
ts = timer_list->active_timers;
|
||||
/* Skip all external timers */
|
||||
while (ts && (ts->attributes & ~attr_mask)) {
|
||||
ts = ts->next;
|
||||
}
|
||||
if (!ts) {
|
||||
qemu_mutex_unlock(&timer_list->active_timers_lock);
|
||||
continue;
|
||||
}
|
||||
expire_time = ts->expire_time;
|
||||
qemu_mutex_unlock(&timer_list->active_timers_lock);
|
||||
|
||||
delta = expire_time - qemu_clock_get_ns(type);
|
||||
if (delta <= 0) {
|
||||
delta = 0;
|
||||
}
|
||||
deadline = qemu_soonest_timeout(deadline, delta);
|
||||
}
|
||||
return deadline;
|
||||
}
|
||||
|
@ -629,9 +648,6 @@ int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg)
|
|||
|
||||
int64_t qemu_clock_get_ns(QEMUClockType type)
|
||||
{
|
||||
int64_t now, last;
|
||||
QEMUClock *clock = qemu_clock_ptr(type);
|
||||
|
||||
switch (type) {
|
||||
case QEMU_CLOCK_REALTIME:
|
||||
return get_clock();
|
||||
|
@ -643,43 +659,12 @@ int64_t qemu_clock_get_ns(QEMUClockType type)
|
|||
return cpu_get_clock();
|
||||
}
|
||||
case QEMU_CLOCK_HOST:
|
||||
now = REPLAY_CLOCK(REPLAY_CLOCK_HOST, get_clock_realtime());
|
||||
last = clock->last;
|
||||
clock->last = now;
|
||||
if (now < last || now > (last + get_max_clock_jump())) {
|
||||
notifier_list_notify(&clock->reset_notifiers, &now);
|
||||
}
|
||||
return now;
|
||||
return REPLAY_CLOCK(REPLAY_CLOCK_HOST, get_clock_realtime());
|
||||
case QEMU_CLOCK_VIRTUAL_RT:
|
||||
return REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT, cpu_get_clock());
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t qemu_clock_get_last(QEMUClockType type)
|
||||
{
|
||||
QEMUClock *clock = qemu_clock_ptr(type);
|
||||
return clock->last;
|
||||
}
|
||||
|
||||
void qemu_clock_set_last(QEMUClockType type, uint64_t last)
|
||||
{
|
||||
QEMUClock *clock = qemu_clock_ptr(type);
|
||||
clock->last = last;
|
||||
}
|
||||
|
||||
void qemu_clock_register_reset_notifier(QEMUClockType type,
|
||||
Notifier *notifier)
|
||||
{
|
||||
QEMUClock *clock = qemu_clock_ptr(type);
|
||||
notifier_list_add(&clock->reset_notifiers, notifier);
|
||||
}
|
||||
|
||||
void qemu_clock_unregister_reset_notifier(QEMUClockType type,
|
||||
Notifier *notifier)
|
||||
{
|
||||
notifier_remove(notifier);
|
||||
}
|
||||
|
||||
void init_clocks(QEMUTimerListNotifyCB *notify_cb)
|
||||
{
|
||||
QEMUClockType type;
|
||||
|
|
Loading…
Reference in New Issue