mirror of https://github.com/xemu-project/xemu.git
* Support for jemalloc
* qemu_mutex_lock_iothread "No such process" fix * cutils: qemu_strto* wrappers * iohandler.c simplification * Many other fixes and misc patches. And some MTTCG work (with Emilio's fixes squashed): * Signal-free TCG kick * Removing spinlock in favor of QemuMutex * User-mode emulation multi-threading fixes/docs -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJV8Tk7AAoJEL/70l94x66Ds3QH/3bi0RRR2NtKIXAQrGo5tfuD NPMu1K5Hy+/26AC6mEVNRh4kh7dPH5E4NnDGbxet1+osvmpjxAjc2JrxEybhHD0j fkpzqynuBN6cA2Gu5GUNoKzxxTmi2RrEYigWDZqCftRXBeO2Hsr1etxJh9UoZw5H dgpU3j/n0Q8s08jUJ1o789knZI/ckwL4oXK4u2KhSC7ZTCWhJT7Qr7c0JmiKReaF JEYAsKkQhICVKRVmC8NxML8U58O8maBjQ62UN6nQpVaQd0Yo/6cstFTZsRrHMHL3 7A2Tyg862cMvp+1DOX3Bk02yXA+nxnzLF8kUe0rYo6llqDBDStzqyn1j9R0qeqA= =nB06 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * Support for jemalloc * qemu_mutex_lock_iothread "No such process" fix * cutils: qemu_strto* wrappers * iohandler.c simplification * Many other fixes and misc patches. And some MTTCG work (with Emilio's fixes squashed): * Signal-free TCG kick * Removing spinlock in favor of QemuMutex * User-mode emulation multi-threading fixes/docs # gpg: Signature made Thu 10 Sep 2015 09:03:07 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: (44 commits) cutils: work around platform differences in strto{l,ul,ll,ull} cpu-exec: fix lock hierarchy for user-mode emulation exec: make mmap_lock/mmap_unlock globally available tcg: comment on which functions have to be called with mmap_lock held tcg: add memory barriers in page_find_alloc accesses remove unused spinlock. replace spinlock by QemuMutex. cpus: remove tcg_halt_cond and tcg_cpu_thread globals cpus: protect work list with work_mutex scripts/dump-guest-memory.py: fix after RAMBlock change configure: Add support for jemalloc add macro file for coccinelle configure: factor out adding disas configure vhost-scsi: fix wrong vhost-scsi firmware path checkpatch: remove tests that are not relevant outside the kernel checkpatch: adapt some tests to QEMU CODING_STYLE: update mixed declaration rules qmp: Add example usage of strto*l() qemu wrapper cutils: Add qemu_strtoull() wrapper cutils: Add qemu_strtoll() wrapper ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
a2aa09e181
13
CODING_STYLE
13
CODING_STYLE
|
@ -87,10 +87,15 @@ Furthermore, it is the QEMU coding style.
|
||||||
|
|
||||||
5. Declarations
|
5. Declarations
|
||||||
|
|
||||||
Mixed declarations (interleaving statements and declarations within blocks)
|
Mixed declarations (interleaving statements and declarations within
|
||||||
are not allowed; declarations should be at the beginning of blocks. In other
|
blocks) are generally not allowed; declarations should be at the beginning
|
||||||
words, the code should not generate warnings if using GCC's
|
of blocks.
|
||||||
-Wdeclaration-after-statement option.
|
|
||||||
|
Every now and then, an exception is made for declarations inside a
|
||||||
|
#ifdef or #ifndef block: if the code looks nicer, such declarations can
|
||||||
|
be placed at the top of the block even if there are statements above.
|
||||||
|
On the other hand, however, it's often best to move that #ifdef/#ifndef
|
||||||
|
block to a separate function altogether.
|
||||||
|
|
||||||
6. Conditional statements
|
6. Conditional statements
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ include config-target.mak
|
||||||
include config-devices.mak
|
include config-devices.mak
|
||||||
include $(SRC_PATH)/rules.mak
|
include $(SRC_PATH)/rules.mak
|
||||||
|
|
||||||
$(call set-vpath, $(SRC_PATH))
|
$(call set-vpath, $(SRC_PATH):$(BUILD_DIR))
|
||||||
ifdef CONFIG_LINUX
|
ifdef CONFIG_LINUX
|
||||||
QEMU_CFLAGS += -I../linux-headers
|
QEMU_CFLAGS += -I../linux-headers
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -1214,6 +1214,10 @@ static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
|
||||||
|
|
||||||
if (task == NULL || task->status != SCSI_STATUS_GOOD) {
|
if (task == NULL || task->status != SCSI_STATUS_GOOD) {
|
||||||
error_setg(errp, "iSCSI: failed to send readcapacity10 command.");
|
error_setg(errp, "iSCSI: failed to send readcapacity10 command.");
|
||||||
|
} else if (!iscsilun->block_size ||
|
||||||
|
iscsilun->block_size % BDRV_SECTOR_SIZE) {
|
||||||
|
error_setg(errp, "iSCSI: the target returned an invalid "
|
||||||
|
"block size of %d.", iscsilun->block_size);
|
||||||
}
|
}
|
||||||
if (task) {
|
if (task) {
|
||||||
scsi_free_scsi_task(task);
|
scsi_free_scsi_task(task);
|
||||||
|
|
|
@ -211,8 +211,6 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||||
abi_ulong new_addr);
|
abi_ulong new_addr);
|
||||||
int target_msync(abi_ulong start, abi_ulong len, int flags);
|
int target_msync(abi_ulong start, abi_ulong len, int flags);
|
||||||
extern unsigned long last_brk;
|
extern unsigned long last_brk;
|
||||||
void mmap_lock(void);
|
|
||||||
void mmap_unlock(void);
|
|
||||||
void cpu_list_lock(void);
|
void cpu_list_lock(void);
|
||||||
void cpu_list_unlock(void);
|
void cpu_list_unlock(void);
|
||||||
#if defined(CONFIG_USE_NPTL)
|
#if defined(CONFIG_USE_NPTL)
|
||||||
|
|
|
@ -337,6 +337,7 @@ libssh2=""
|
||||||
vhdx=""
|
vhdx=""
|
||||||
numa=""
|
numa=""
|
||||||
tcmalloc="no"
|
tcmalloc="no"
|
||||||
|
jemalloc="no"
|
||||||
|
|
||||||
# parse CC options first
|
# parse CC options first
|
||||||
for opt do
|
for opt do
|
||||||
|
@ -1143,6 +1144,10 @@ for opt do
|
||||||
;;
|
;;
|
||||||
--enable-tcmalloc) tcmalloc="yes"
|
--enable-tcmalloc) tcmalloc="yes"
|
||||||
;;
|
;;
|
||||||
|
--disable-jemalloc) jemalloc="no"
|
||||||
|
;;
|
||||||
|
--enable-jemalloc) jemalloc="yes"
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "ERROR: unknown option $opt"
|
echo "ERROR: unknown option $opt"
|
||||||
echo "Try '$0 --help' for more information"
|
echo "Try '$0 --help' for more information"
|
||||||
|
@ -1367,6 +1372,7 @@ disabled with --disable-FEATURE, default is enabled if available:
|
||||||
vhdx support for the Microsoft VHDX image format
|
vhdx support for the Microsoft VHDX image format
|
||||||
numa libnuma support
|
numa libnuma support
|
||||||
tcmalloc tcmalloc support
|
tcmalloc tcmalloc support
|
||||||
|
jemalloc jemalloc support
|
||||||
|
|
||||||
NOTE: The object files are built at the place where configure is launched
|
NOTE: The object files are built at the place where configure is launched
|
||||||
EOF
|
EOF
|
||||||
|
@ -3395,6 +3401,11 @@ EOF
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if test "$tcmalloc" = "yes" && test "$jemalloc" = "yes" ; then
|
||||||
|
echo "ERROR: tcmalloc && jemalloc can't be used at the same time"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
##########################################
|
##########################################
|
||||||
# tcmalloc probe
|
# tcmalloc probe
|
||||||
|
|
||||||
|
@ -3411,6 +3422,22 @@ EOF
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
##########################################
|
||||||
|
# jemalloc probe
|
||||||
|
|
||||||
|
if test "$jemalloc" = "yes" ; then
|
||||||
|
cat > $TMPC << EOF
|
||||||
|
#include <stdlib.h>
|
||||||
|
int main(void) { malloc(1); return 0; }
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if compile_prog "" "-ljemalloc" ; then
|
||||||
|
LIBS="-ljemalloc $LIBS"
|
||||||
|
else
|
||||||
|
feature_not_found "jemalloc" "install jemalloc devel"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
##########################################
|
##########################################
|
||||||
# signalfd probe
|
# signalfd probe
|
||||||
signalfd="no"
|
signalfd="no"
|
||||||
|
@ -4629,6 +4656,7 @@ echo "snappy support $snappy"
|
||||||
echo "bzip2 support $bzip2"
|
echo "bzip2 support $bzip2"
|
||||||
echo "NUMA host support $numa"
|
echo "NUMA host support $numa"
|
||||||
echo "tcmalloc support $tcmalloc"
|
echo "tcmalloc support $tcmalloc"
|
||||||
|
echo "jemalloc support $jemalloc"
|
||||||
|
|
||||||
if test "$sdl_too_old" = "yes"; then
|
if test "$sdl_too_old" = "yes"; then
|
||||||
echo "-> Your SDL version is too old - please upgrade to have SDL support"
|
echo "-> Your SDL version is too old - please upgrade to have SDL support"
|
||||||
|
@ -5519,91 +5547,76 @@ fi
|
||||||
cflags=""
|
cflags=""
|
||||||
ldflags=""
|
ldflags=""
|
||||||
|
|
||||||
|
disas_config() {
|
||||||
|
echo "CONFIG_${1}_DIS=y" >> $config_target_mak
|
||||||
|
echo "CONFIG_${1}_DIS=y" >> config-all-disas.mak
|
||||||
|
}
|
||||||
|
|
||||||
for i in $ARCH $TARGET_BASE_ARCH ; do
|
for i in $ARCH $TARGET_BASE_ARCH ; do
|
||||||
case "$i" in
|
case "$i" in
|
||||||
alpha)
|
alpha)
|
||||||
echo "CONFIG_ALPHA_DIS=y" >> $config_target_mak
|
disas_config "ALPHA"
|
||||||
echo "CONFIG_ALPHA_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
aarch64)
|
aarch64)
|
||||||
if test -n "${cxx}"; then
|
if test -n "${cxx}"; then
|
||||||
echo "CONFIG_ARM_A64_DIS=y" >> $config_target_mak
|
disas_config "ARM_A64"
|
||||||
echo "CONFIG_ARM_A64_DIS=y" >> config-all-disas.mak
|
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
arm)
|
arm)
|
||||||
echo "CONFIG_ARM_DIS=y" >> $config_target_mak
|
disas_config "ARM"
|
||||||
echo "CONFIG_ARM_DIS=y" >> config-all-disas.mak
|
|
||||||
if test -n "${cxx}"; then
|
if test -n "${cxx}"; then
|
||||||
echo "CONFIG_ARM_A64_DIS=y" >> $config_target_mak
|
disas_config "ARM_A64"
|
||||||
echo "CONFIG_ARM_A64_DIS=y" >> config-all-disas.mak
|
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
cris)
|
cris)
|
||||||
echo "CONFIG_CRIS_DIS=y" >> $config_target_mak
|
disas_config "CRIS"
|
||||||
echo "CONFIG_CRIS_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
hppa)
|
hppa)
|
||||||
echo "CONFIG_HPPA_DIS=y" >> $config_target_mak
|
disas_config "HPPA"
|
||||||
echo "CONFIG_HPPA_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
i386|x86_64|x32)
|
i386|x86_64|x32)
|
||||||
echo "CONFIG_I386_DIS=y" >> $config_target_mak
|
disas_config "I386"
|
||||||
echo "CONFIG_I386_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
ia64*)
|
ia64*)
|
||||||
echo "CONFIG_IA64_DIS=y" >> $config_target_mak
|
disas_config "IA64"
|
||||||
echo "CONFIG_IA64_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
lm32)
|
lm32)
|
||||||
echo "CONFIG_LM32_DIS=y" >> $config_target_mak
|
disas_config "LM32"
|
||||||
echo "CONFIG_LM32_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
m68k)
|
m68k)
|
||||||
echo "CONFIG_M68K_DIS=y" >> $config_target_mak
|
disas_config "M68K"
|
||||||
echo "CONFIG_M68K_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
microblaze*)
|
microblaze*)
|
||||||
echo "CONFIG_MICROBLAZE_DIS=y" >> $config_target_mak
|
disas_config "MICROBLAZE"
|
||||||
echo "CONFIG_MICROBLAZE_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
mips*)
|
mips*)
|
||||||
echo "CONFIG_MIPS_DIS=y" >> $config_target_mak
|
disas_config "MIPS"
|
||||||
echo "CONFIG_MIPS_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
moxie*)
|
moxie*)
|
||||||
echo "CONFIG_MOXIE_DIS=y" >> $config_target_mak
|
disas_config "MOXIE"
|
||||||
echo "CONFIG_MOXIE_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
or32)
|
or32)
|
||||||
echo "CONFIG_OPENRISC_DIS=y" >> $config_target_mak
|
disas_config "OPENRISC"
|
||||||
echo "CONFIG_OPENRISC_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
ppc*)
|
ppc*)
|
||||||
echo "CONFIG_PPC_DIS=y" >> $config_target_mak
|
disas_config "PPC"
|
||||||
echo "CONFIG_PPC_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
s390*)
|
s390*)
|
||||||
echo "CONFIG_S390_DIS=y" >> $config_target_mak
|
disas_config "S390"
|
||||||
echo "CONFIG_S390_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
sh4)
|
sh4)
|
||||||
echo "CONFIG_SH4_DIS=y" >> $config_target_mak
|
disas_config "SH4"
|
||||||
echo "CONFIG_SH4_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
sparc*)
|
sparc*)
|
||||||
echo "CONFIG_SPARC_DIS=y" >> $config_target_mak
|
disas_config "SPARC"
|
||||||
echo "CONFIG_SPARC_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
xtensa*)
|
xtensa*)
|
||||||
echo "CONFIG_XTENSA_DIS=y" >> $config_target_mak
|
disas_config "XTENSA"
|
||||||
echo "CONFIG_XTENSA_DIS=y" >> config-all-disas.mak
|
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
if test "$tcg_interpreter" = "yes" ; then
|
if test "$tcg_interpreter" = "yes" ; then
|
||||||
echo "CONFIG_TCI_DIS=y" >> $config_target_mak
|
disas_config "TCI"
|
||||||
echo "CONFIG_TCI_DIS=y" >> config-all-disas.mak
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "$ARCH" in
|
case "$ARCH" in
|
||||||
|
|
119
cpu-exec.c
119
cpu-exec.c
|
@ -258,10 +258,10 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
||||||
tb_free(tb);
|
tb_free(tb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static TranslationBlock *tb_find_slow(CPUState *cpu,
|
static TranslationBlock *tb_find_physical(CPUState *cpu,
|
||||||
target_ulong pc,
|
target_ulong pc,
|
||||||
target_ulong cs_base,
|
target_ulong cs_base,
|
||||||
uint64_t flags)
|
uint64_t flags)
|
||||||
{
|
{
|
||||||
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
|
||||||
TranslationBlock *tb, **ptb1;
|
TranslationBlock *tb, **ptb1;
|
||||||
|
@ -278,8 +278,9 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
|
||||||
ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
|
ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
|
||||||
for(;;) {
|
for(;;) {
|
||||||
tb = *ptb1;
|
tb = *ptb1;
|
||||||
if (!tb)
|
if (!tb) {
|
||||||
goto not_found;
|
return NULL;
|
||||||
|
}
|
||||||
if (tb->pc == pc &&
|
if (tb->pc == pc &&
|
||||||
tb->page_addr[0] == phys_page1 &&
|
tb->page_addr[0] == phys_page1 &&
|
||||||
tb->cs_base == cs_base &&
|
tb->cs_base == cs_base &&
|
||||||
|
@ -291,25 +292,59 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
|
||||||
virt_page2 = (pc & TARGET_PAGE_MASK) +
|
virt_page2 = (pc & TARGET_PAGE_MASK) +
|
||||||
TARGET_PAGE_SIZE;
|
TARGET_PAGE_SIZE;
|
||||||
phys_page2 = get_page_addr_code(env, virt_page2);
|
phys_page2 = get_page_addr_code(env, virt_page2);
|
||||||
if (tb->page_addr[1] == phys_page2)
|
if (tb->page_addr[1] == phys_page2) {
|
||||||
goto found;
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
goto found;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ptb1 = &tb->phys_hash_next;
|
ptb1 = &tb->phys_hash_next;
|
||||||
}
|
}
|
||||||
not_found:
|
|
||||||
/* if no translated code available, then translate it now */
|
/* Move the TB to the head of the list */
|
||||||
|
*ptb1 = tb->phys_hash_next;
|
||||||
|
tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
|
||||||
|
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
|
||||||
|
return tb;
|
||||||
|
}
|
||||||
|
|
||||||
|
static TranslationBlock *tb_find_slow(CPUState *cpu,
|
||||||
|
target_ulong pc,
|
||||||
|
target_ulong cs_base,
|
||||||
|
uint64_t flags)
|
||||||
|
{
|
||||||
|
TranslationBlock *tb;
|
||||||
|
|
||||||
|
tb = tb_find_physical(cpu, pc, cs_base, flags);
|
||||||
|
if (tb) {
|
||||||
|
goto found;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
|
||||||
|
* taken outside tb_lock. Since we're momentarily dropping
|
||||||
|
* tb_lock, there's a chance that our desired tb has been
|
||||||
|
* translated.
|
||||||
|
*/
|
||||||
|
tb_unlock();
|
||||||
|
mmap_lock();
|
||||||
|
tb_lock();
|
||||||
|
tb = tb_find_physical(cpu, pc, cs_base, flags);
|
||||||
|
if (tb) {
|
||||||
|
mmap_unlock();
|
||||||
|
goto found;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* if no translated code available, then translate it now */
|
||||||
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
|
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
|
||||||
|
|
||||||
found:
|
#ifdef CONFIG_USER_ONLY
|
||||||
/* Move the last found TB to the head of the list */
|
mmap_unlock();
|
||||||
if (likely(*ptb1)) {
|
#endif
|
||||||
*ptb1 = tb->phys_hash_next;
|
|
||||||
tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
|
found:
|
||||||
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
|
|
||||||
}
|
|
||||||
/* we add the TB in the virtual pc hash table */
|
/* we add the TB in the virtual pc hash table */
|
||||||
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
|
cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
|
||||||
return tb;
|
return tb;
|
||||||
|
@ -350,7 +385,8 @@ static void cpu_handle_debug_exception(CPUState *cpu)
|
||||||
|
|
||||||
/* main execution loop */
|
/* main execution loop */
|
||||||
|
|
||||||
volatile sig_atomic_t exit_request;
|
bool exit_request;
|
||||||
|
CPUState *tcg_current_cpu;
|
||||||
|
|
||||||
int cpu_exec(CPUState *cpu)
|
int cpu_exec(CPUState *cpu)
|
||||||
{
|
{
|
||||||
|
@ -365,9 +401,6 @@ int cpu_exec(CPUState *cpu)
|
||||||
uintptr_t next_tb;
|
uintptr_t next_tb;
|
||||||
SyncClocks sc;
|
SyncClocks sc;
|
||||||
|
|
||||||
/* This must be volatile so it is not trashed by longjmp() */
|
|
||||||
volatile bool have_tb_lock = false;
|
|
||||||
|
|
||||||
if (cpu->halted) {
|
if (cpu->halted) {
|
||||||
if (!cpu_has_work(cpu)) {
|
if (!cpu_has_work(cpu)) {
|
||||||
return EXCP_HALTED;
|
return EXCP_HALTED;
|
||||||
|
@ -377,18 +410,10 @@ int cpu_exec(CPUState *cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
current_cpu = cpu;
|
current_cpu = cpu;
|
||||||
|
atomic_mb_set(&tcg_current_cpu, cpu);
|
||||||
/* As long as current_cpu is null, up to the assignment just above,
|
|
||||||
* requests by other threads to exit the execution loop are expected to
|
|
||||||
* be issued using the exit_request global. We must make sure that our
|
|
||||||
* evaluation of the global value is performed past the current_cpu
|
|
||||||
* value transition point, which requires a memory barrier as well as
|
|
||||||
* an instruction scheduling constraint on modern architectures. */
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
if (unlikely(exit_request)) {
|
if (unlikely(atomic_mb_read(&exit_request))) {
|
||||||
cpu->exit_request = 1;
|
cpu->exit_request = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,8 +509,7 @@ int cpu_exec(CPUState *cpu)
|
||||||
cpu->exception_index = EXCP_INTERRUPT;
|
cpu->exception_index = EXCP_INTERRUPT;
|
||||||
cpu_loop_exit(cpu);
|
cpu_loop_exit(cpu);
|
||||||
}
|
}
|
||||||
spin_lock(&tcg_ctx.tb_ctx.tb_lock);
|
tb_lock();
|
||||||
have_tb_lock = true;
|
|
||||||
tb = tb_find_fast(cpu);
|
tb = tb_find_fast(cpu);
|
||||||
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
||||||
doing it in tb_find_slow */
|
doing it in tb_find_slow */
|
||||||
|
@ -507,20 +531,14 @@ int cpu_exec(CPUState *cpu)
|
||||||
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
|
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
|
||||||
next_tb & TB_EXIT_MASK, tb);
|
next_tb & TB_EXIT_MASK, tb);
|
||||||
}
|
}
|
||||||
have_tb_lock = false;
|
tb_unlock();
|
||||||
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
|
||||||
|
|
||||||
/* cpu_interrupt might be called while translating the
|
|
||||||
TB, but before it is linked into a potentially
|
|
||||||
infinite loop and becomes env->current_tb. Avoid
|
|
||||||
starting execution if there is a pending interrupt. */
|
|
||||||
cpu->current_tb = tb;
|
|
||||||
barrier();
|
|
||||||
if (likely(!cpu->exit_request)) {
|
if (likely(!cpu->exit_request)) {
|
||||||
trace_exec_tb(tb, tb->pc);
|
trace_exec_tb(tb, tb->pc);
|
||||||
tc_ptr = tb->tc_ptr;
|
tc_ptr = tb->tc_ptr;
|
||||||
/* execute the generated code */
|
/* execute the generated code */
|
||||||
|
cpu->current_tb = tb;
|
||||||
next_tb = cpu_tb_exec(cpu, tc_ptr);
|
next_tb = cpu_tb_exec(cpu, tc_ptr);
|
||||||
|
cpu->current_tb = NULL;
|
||||||
switch (next_tb & TB_EXIT_MASK) {
|
switch (next_tb & TB_EXIT_MASK) {
|
||||||
case TB_EXIT_REQUESTED:
|
case TB_EXIT_REQUESTED:
|
||||||
/* Something asked us to stop executing
|
/* Something asked us to stop executing
|
||||||
|
@ -528,8 +546,12 @@ int cpu_exec(CPUState *cpu)
|
||||||
* loop. Whatever requested the exit will also
|
* loop. Whatever requested the exit will also
|
||||||
* have set something else (eg exit_request or
|
* have set something else (eg exit_request or
|
||||||
* interrupt_request) which we will handle
|
* interrupt_request) which we will handle
|
||||||
* next time around the loop.
|
* next time around the loop. But we need to
|
||||||
|
* ensure the tcg_exit_req read in generated code
|
||||||
|
* comes before the next read of cpu->exit_request
|
||||||
|
* or cpu->interrupt_request.
|
||||||
*/
|
*/
|
||||||
|
smp_rmb();
|
||||||
next_tb = 0;
|
next_tb = 0;
|
||||||
break;
|
break;
|
||||||
case TB_EXIT_ICOUNT_EXPIRED:
|
case TB_EXIT_ICOUNT_EXPIRED:
|
||||||
|
@ -559,7 +581,6 @@ int cpu_exec(CPUState *cpu)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpu->current_tb = NULL;
|
|
||||||
/* Try to align the host and virtual clocks
|
/* Try to align the host and virtual clocks
|
||||||
if the guest is in advance */
|
if the guest is in advance */
|
||||||
align_clocks(&sc, cpu);
|
align_clocks(&sc, cpu);
|
||||||
|
@ -576,10 +597,7 @@ int cpu_exec(CPUState *cpu)
|
||||||
x86_cpu = X86_CPU(cpu);
|
x86_cpu = X86_CPU(cpu);
|
||||||
env = &x86_cpu->env;
|
env = &x86_cpu->env;
|
||||||
#endif
|
#endif
|
||||||
if (have_tb_lock) {
|
tb_lock_reset();
|
||||||
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
|
||||||
have_tb_lock = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} /* for(;;) */
|
} /* for(;;) */
|
||||||
|
|
||||||
|
@ -588,5 +606,8 @@ int cpu_exec(CPUState *cpu)
|
||||||
|
|
||||||
/* fail safe : never use current_cpu outside cpu_exec() */
|
/* fail safe : never use current_cpu outside cpu_exec() */
|
||||||
current_cpu = NULL;
|
current_cpu = NULL;
|
||||||
|
|
||||||
|
/* Does not need atomic_mb_set because a spurious wakeup is okay. */
|
||||||
|
atomic_set(&tcg_current_cpu, NULL);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
132
cpus.c
132
cpus.c
|
@ -661,14 +661,6 @@ static void cpu_handle_guest_debug(CPUState *cpu)
|
||||||
cpu->stopped = true;
|
cpu->stopped = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_signal(int sig)
|
|
||||||
{
|
|
||||||
if (current_cpu) {
|
|
||||||
cpu_exit(current_cpu);
|
|
||||||
}
|
|
||||||
exit_request = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_LINUX
|
#ifdef CONFIG_LINUX
|
||||||
static void sigbus_reraise(void)
|
static void sigbus_reraise(void)
|
||||||
{
|
{
|
||||||
|
@ -781,29 +773,11 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qemu_tcg_init_cpu_signals(void)
|
|
||||||
{
|
|
||||||
sigset_t set;
|
|
||||||
struct sigaction sigact;
|
|
||||||
|
|
||||||
memset(&sigact, 0, sizeof(sigact));
|
|
||||||
sigact.sa_handler = cpu_signal;
|
|
||||||
sigaction(SIG_IPI, &sigact, NULL);
|
|
||||||
|
|
||||||
sigemptyset(&set);
|
|
||||||
sigaddset(&set, SIG_IPI);
|
|
||||||
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* _WIN32 */
|
#else /* _WIN32 */
|
||||||
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
|
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
|
||||||
{
|
{
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qemu_tcg_init_cpu_signals(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* _WIN32 */
|
#endif /* _WIN32 */
|
||||||
|
|
||||||
static QemuMutex qemu_global_mutex;
|
static QemuMutex qemu_global_mutex;
|
||||||
|
@ -812,9 +786,6 @@ static unsigned iothread_requesting_mutex;
|
||||||
|
|
||||||
static QemuThread io_thread;
|
static QemuThread io_thread;
|
||||||
|
|
||||||
static QemuThread *tcg_cpu_thread;
|
|
||||||
static QemuCond *tcg_halt_cond;
|
|
||||||
|
|
||||||
/* cpu creation */
|
/* cpu creation */
|
||||||
static QemuCond qemu_cpu_cond;
|
static QemuCond qemu_cpu_cond;
|
||||||
/* system init */
|
/* system init */
|
||||||
|
@ -845,6 +816,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||||
wi.func = func;
|
wi.func = func;
|
||||||
wi.data = data;
|
wi.data = data;
|
||||||
wi.free = false;
|
wi.free = false;
|
||||||
|
|
||||||
|
qemu_mutex_lock(&cpu->work_mutex);
|
||||||
if (cpu->queued_work_first == NULL) {
|
if (cpu->queued_work_first == NULL) {
|
||||||
cpu->queued_work_first = &wi;
|
cpu->queued_work_first = &wi;
|
||||||
} else {
|
} else {
|
||||||
|
@ -853,9 +826,10 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||||
cpu->queued_work_last = &wi;
|
cpu->queued_work_last = &wi;
|
||||||
wi.next = NULL;
|
wi.next = NULL;
|
||||||
wi.done = false;
|
wi.done = false;
|
||||||
|
qemu_mutex_unlock(&cpu->work_mutex);
|
||||||
|
|
||||||
qemu_cpu_kick(cpu);
|
qemu_cpu_kick(cpu);
|
||||||
while (!wi.done) {
|
while (!atomic_mb_read(&wi.done)) {
|
||||||
CPUState *self_cpu = current_cpu;
|
CPUState *self_cpu = current_cpu;
|
||||||
|
|
||||||
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
|
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
|
||||||
|
@ -876,6 +850,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||||
wi->func = func;
|
wi->func = func;
|
||||||
wi->data = data;
|
wi->data = data;
|
||||||
wi->free = true;
|
wi->free = true;
|
||||||
|
|
||||||
|
qemu_mutex_lock(&cpu->work_mutex);
|
||||||
if (cpu->queued_work_first == NULL) {
|
if (cpu->queued_work_first == NULL) {
|
||||||
cpu->queued_work_first = wi;
|
cpu->queued_work_first = wi;
|
||||||
} else {
|
} else {
|
||||||
|
@ -884,6 +860,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||||
cpu->queued_work_last = wi;
|
cpu->queued_work_last = wi;
|
||||||
wi->next = NULL;
|
wi->next = NULL;
|
||||||
wi->done = false;
|
wi->done = false;
|
||||||
|
qemu_mutex_unlock(&cpu->work_mutex);
|
||||||
|
|
||||||
qemu_cpu_kick(cpu);
|
qemu_cpu_kick(cpu);
|
||||||
}
|
}
|
||||||
|
@ -896,15 +873,23 @@ static void flush_queued_work(CPUState *cpu)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
while ((wi = cpu->queued_work_first)) {
|
qemu_mutex_lock(&cpu->work_mutex);
|
||||||
|
while (cpu->queued_work_first != NULL) {
|
||||||
|
wi = cpu->queued_work_first;
|
||||||
cpu->queued_work_first = wi->next;
|
cpu->queued_work_first = wi->next;
|
||||||
|
if (!cpu->queued_work_first) {
|
||||||
|
cpu->queued_work_last = NULL;
|
||||||
|
}
|
||||||
|
qemu_mutex_unlock(&cpu->work_mutex);
|
||||||
wi->func(wi->data);
|
wi->func(wi->data);
|
||||||
wi->done = true;
|
qemu_mutex_lock(&cpu->work_mutex);
|
||||||
if (wi->free) {
|
if (wi->free) {
|
||||||
g_free(wi);
|
g_free(wi);
|
||||||
|
} else {
|
||||||
|
atomic_mb_set(&wi->done, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpu->queued_work_last = NULL;
|
qemu_mutex_unlock(&cpu->work_mutex);
|
||||||
qemu_cond_broadcast(&qemu_work_cond);
|
qemu_cond_broadcast(&qemu_work_cond);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -919,15 +904,13 @@ static void qemu_wait_io_event_common(CPUState *cpu)
|
||||||
cpu->thread_kicked = false;
|
cpu->thread_kicked = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qemu_tcg_wait_io_event(void)
|
static void qemu_tcg_wait_io_event(CPUState *cpu)
|
||||||
{
|
{
|
||||||
CPUState *cpu;
|
|
||||||
|
|
||||||
while (all_cpu_threads_idle()) {
|
while (all_cpu_threads_idle()) {
|
||||||
/* Start accounting real time to the virtual clock if the CPUs
|
/* Start accounting real time to the virtual clock if the CPUs
|
||||||
are idle. */
|
are idle. */
|
||||||
qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
|
qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
|
||||||
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
|
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (iothread_requesting_mutex) {
|
while (iothread_requesting_mutex) {
|
||||||
|
@ -1041,7 +1024,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
|
|
||||||
qemu_mutex_lock_iothread();
|
qemu_mutex_lock_iothread();
|
||||||
qemu_tcg_init_cpu_signals();
|
|
||||||
qemu_thread_get_self(cpu->thread);
|
qemu_thread_get_self(cpu->thread);
|
||||||
|
|
||||||
CPU_FOREACH(cpu) {
|
CPU_FOREACH(cpu) {
|
||||||
|
@ -1053,7 +1035,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||||
|
|
||||||
/* wait for initial kick-off after machine start */
|
/* wait for initial kick-off after machine start */
|
||||||
while (first_cpu->stopped) {
|
while (first_cpu->stopped) {
|
||||||
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
|
qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
|
||||||
|
|
||||||
/* process any pending work */
|
/* process any pending work */
|
||||||
CPU_FOREACH(cpu) {
|
CPU_FOREACH(cpu) {
|
||||||
|
@ -1062,7 +1044,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* process any pending work */
|
/* process any pending work */
|
||||||
exit_request = 1;
|
atomic_mb_set(&exit_request, 1);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
tcg_exec_all();
|
tcg_exec_all();
|
||||||
|
@ -1074,7 +1056,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qemu_tcg_wait_io_event();
|
qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1085,61 +1067,47 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (cpu->thread_kicked) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cpu->thread_kicked = true;
|
||||||
err = pthread_kill(cpu->thread->thread, SIG_IPI);
|
err = pthread_kill(cpu->thread->thread, SIG_IPI);
|
||||||
if (err) {
|
if (err) {
|
||||||
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
|
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
#else /* _WIN32 */
|
#else /* _WIN32 */
|
||||||
if (!qemu_cpu_is_self(cpu)) {
|
abort();
|
||||||
CONTEXT tcgContext;
|
|
||||||
|
|
||||||
if (SuspendThread(cpu->hThread) == (DWORD)-1) {
|
|
||||||
fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
|
|
||||||
GetLastError());
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* On multi-core systems, we are not sure that the thread is actually
|
|
||||||
* suspended until we can get the context.
|
|
||||||
*/
|
|
||||||
tcgContext.ContextFlags = CONTEXT_CONTROL;
|
|
||||||
while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_signal(0);
|
|
||||||
|
|
||||||
if (ResumeThread(cpu->hThread) == (DWORD)-1) {
|
|
||||||
fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
|
|
||||||
GetLastError());
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qemu_cpu_kick_no_halt(void)
|
||||||
|
{
|
||||||
|
CPUState *cpu;
|
||||||
|
/* Ensure whatever caused the exit has reached the CPU threads before
|
||||||
|
* writing exit_request.
|
||||||
|
*/
|
||||||
|
atomic_mb_set(&exit_request, 1);
|
||||||
|
cpu = atomic_mb_read(&tcg_current_cpu);
|
||||||
|
if (cpu) {
|
||||||
|
cpu_exit(cpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void qemu_cpu_kick(CPUState *cpu)
|
void qemu_cpu_kick(CPUState *cpu)
|
||||||
{
|
{
|
||||||
qemu_cond_broadcast(cpu->halt_cond);
|
qemu_cond_broadcast(cpu->halt_cond);
|
||||||
if (!tcg_enabled() && !cpu->thread_kicked) {
|
if (tcg_enabled()) {
|
||||||
|
qemu_cpu_kick_no_halt();
|
||||||
|
} else {
|
||||||
qemu_cpu_kick_thread(cpu);
|
qemu_cpu_kick_thread(cpu);
|
||||||
cpu->thread_kicked = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_cpu_kick_self(void)
|
void qemu_cpu_kick_self(void)
|
||||||
{
|
{
|
||||||
#ifndef _WIN32
|
|
||||||
assert(current_cpu);
|
assert(current_cpu);
|
||||||
|
qemu_cpu_kick_thread(current_cpu);
|
||||||
if (!current_cpu->thread_kicked) {
|
|
||||||
qemu_cpu_kick_thread(current_cpu);
|
|
||||||
current_cpu->thread_kicked = true;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
abort();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool qemu_cpu_is_self(CPUState *cpu)
|
bool qemu_cpu_is_self(CPUState *cpu)
|
||||||
|
@ -1166,12 +1134,12 @@ void qemu_mutex_lock_iothread(void)
|
||||||
* TCG code execution.
|
* TCG code execution.
|
||||||
*/
|
*/
|
||||||
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
|
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
|
||||||
!first_cpu || !first_cpu->thread) {
|
!first_cpu || !first_cpu->created) {
|
||||||
qemu_mutex_lock(&qemu_global_mutex);
|
qemu_mutex_lock(&qemu_global_mutex);
|
||||||
atomic_dec(&iothread_requesting_mutex);
|
atomic_dec(&iothread_requesting_mutex);
|
||||||
} else {
|
} else {
|
||||||
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
||||||
qemu_cpu_kick_thread(first_cpu);
|
qemu_cpu_kick_no_halt();
|
||||||
qemu_mutex_lock(&qemu_global_mutex);
|
qemu_mutex_lock(&qemu_global_mutex);
|
||||||
}
|
}
|
||||||
atomic_dec(&iothread_requesting_mutex);
|
atomic_dec(&iothread_requesting_mutex);
|
||||||
|
@ -1251,6 +1219,8 @@ void resume_all_vcpus(void)
|
||||||
static void qemu_tcg_init_vcpu(CPUState *cpu)
|
static void qemu_tcg_init_vcpu(CPUState *cpu)
|
||||||
{
|
{
|
||||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||||
|
static QemuCond *tcg_halt_cond;
|
||||||
|
static QemuThread *tcg_cpu_thread;
|
||||||
|
|
||||||
tcg_cpu_address_space_init(cpu, cpu->as);
|
tcg_cpu_address_space_init(cpu, cpu->as);
|
||||||
|
|
||||||
|
@ -1440,7 +1410,9 @@ static void tcg_exec_all(void)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exit_request = 0;
|
|
||||||
|
/* Pairs with smp_wmb in qemu_cpu_kick. */
|
||||||
|
atomic_mb_set(&exit_request, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
|
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
|
||||||
|
|
2
exec.c
2
exec.c
|
@ -90,7 +90,7 @@ static MemoryRegion io_mem_unassigned;
|
||||||
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
||||||
/* current CPU in the current thread. It is only valid inside
|
/* current CPU in the current thread. It is only valid inside
|
||||||
cpu_exec() */
|
cpu_exec() */
|
||||||
DEFINE_TLS(CPUState *, current_cpu);
|
__thread CPUState *current_cpu;
|
||||||
/* 0 = Do not count executed instructions.
|
/* 0 = Do not count executed instructions.
|
||||||
1 = Precise instruction counting.
|
1 = Precise instruction counting.
|
||||||
2 = Adaptive rate instruction counting. */
|
2 = Adaptive rate instruction counting. */
|
||||||
|
|
|
@ -1359,7 +1359,7 @@ void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va)
|
||||||
is still in the running state, which can cause packets to be dropped
|
is still in the running state, which can cause packets to be dropped
|
||||||
and state transition 'T' packets to be sent while the syscall is still
|
and state transition 'T' packets to be sent while the syscall is still
|
||||||
being processed. */
|
being processed. */
|
||||||
cpu_exit(s->c_cpu);
|
qemu_cpu_kick(s->c_cpu);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1417,7 +1417,7 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
|
||||||
* recall us...
|
* recall us...
|
||||||
*/
|
*/
|
||||||
DMA_hold_DREQ(fdctrl->dma_chann);
|
DMA_hold_DREQ(fdctrl->dma_chann);
|
||||||
DMA_schedule(fdctrl->dma_chann);
|
DMA_schedule();
|
||||||
} else {
|
} else {
|
||||||
/* Start transfer */
|
/* Start transfer */
|
||||||
fdctrl_transfer_handler(fdctrl, fdctrl->dma_chann, 0,
|
fdctrl_transfer_handler(fdctrl, fdctrl->dma_chann, 0,
|
||||||
|
|
|
@ -38,7 +38,6 @@ do { fprintf(stderr, "i82374 ERROR: " fmt , ## __VA_ARGS__); } while (0)
|
||||||
|
|
||||||
typedef struct I82374State {
|
typedef struct I82374State {
|
||||||
uint8_t commands[8];
|
uint8_t commands[8];
|
||||||
qemu_irq out;
|
|
||||||
PortioList port_list;
|
PortioList port_list;
|
||||||
} I82374State;
|
} I82374State;
|
||||||
|
|
||||||
|
@ -101,7 +100,7 @@ static uint32_t i82374_read_descriptor(void *opaque, uint32_t nport)
|
||||||
|
|
||||||
static void i82374_realize(I82374State *s, Error **errp)
|
static void i82374_realize(I82374State *s, Error **errp)
|
||||||
{
|
{
|
||||||
DMA_init(1, &s->out);
|
DMA_init(1);
|
||||||
memset(s->commands, 0, sizeof(s->commands));
|
memset(s->commands, 0, sizeof(s->commands));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,8 +144,6 @@ static void i82374_isa_realize(DeviceState *dev, Error **errp)
|
||||||
isa->iobase);
|
isa->iobase);
|
||||||
|
|
||||||
i82374_realize(s, errp);
|
i82374_realize(s, errp);
|
||||||
|
|
||||||
qdev_init_gpio_out(dev, &s->out, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static Property i82374_properties[] = {
|
static Property i82374_properties[] = {
|
||||||
|
|
|
@ -59,7 +59,6 @@ static struct dma_cont {
|
||||||
uint8_t flip_flop;
|
uint8_t flip_flop;
|
||||||
int dshift;
|
int dshift;
|
||||||
struct dma_regs regs[4];
|
struct dma_regs regs[4];
|
||||||
qemu_irq *cpu_request_exit;
|
|
||||||
MemoryRegion channel_io;
|
MemoryRegion channel_io;
|
||||||
MemoryRegion cont_io;
|
MemoryRegion cont_io;
|
||||||
} dma_controllers[2];
|
} dma_controllers[2];
|
||||||
|
@ -358,6 +357,7 @@ static void channel_run (int ncont, int ichan)
|
||||||
}
|
}
|
||||||
|
|
||||||
static QEMUBH *dma_bh;
|
static QEMUBH *dma_bh;
|
||||||
|
static bool dma_bh_scheduled;
|
||||||
|
|
||||||
static void DMA_run (void)
|
static void DMA_run (void)
|
||||||
{
|
{
|
||||||
|
@ -390,12 +390,15 @@ static void DMA_run (void)
|
||||||
|
|
||||||
running = 0;
|
running = 0;
|
||||||
out:
|
out:
|
||||||
if (rearm)
|
if (rearm) {
|
||||||
qemu_bh_schedule_idle(dma_bh);
|
qemu_bh_schedule_idle(dma_bh);
|
||||||
|
dma_bh_scheduled = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void DMA_run_bh(void *unused)
|
static void DMA_run_bh(void *unused)
|
||||||
{
|
{
|
||||||
|
dma_bh_scheduled = false;
|
||||||
DMA_run();
|
DMA_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -458,12 +461,14 @@ int DMA_write_memory (int nchan, void *buf, int pos, int len)
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* request the emulator to transfer a new DMA memory block ASAP */
|
/* request the emulator to transfer a new DMA memory block ASAP (even
|
||||||
void DMA_schedule(int nchan)
|
* if the idle bottom half would not have exited the iothread yet).
|
||||||
|
*/
|
||||||
|
void DMA_schedule(void)
|
||||||
{
|
{
|
||||||
struct dma_cont *d = &dma_controllers[nchan > 3];
|
if (dma_bh_scheduled) {
|
||||||
|
qemu_notify_event();
|
||||||
qemu_irq_pulse(*d->cpu_request_exit);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_reset(void *opaque)
|
static void dma_reset(void *opaque)
|
||||||
|
@ -515,13 +520,11 @@ static const MemoryRegionOps cont_io_ops = {
|
||||||
|
|
||||||
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
|
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
|
||||||
static void dma_init2(struct dma_cont *d, int base, int dshift,
|
static void dma_init2(struct dma_cont *d, int base, int dshift,
|
||||||
int page_base, int pageh_base,
|
int page_base, int pageh_base)
|
||||||
qemu_irq *cpu_request_exit)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
d->dshift = dshift;
|
d->dshift = dshift;
|
||||||
d->cpu_request_exit = cpu_request_exit;
|
|
||||||
|
|
||||||
memory_region_init_io(&d->channel_io, NULL, &channel_io_ops, d,
|
memory_region_init_io(&d->channel_io, NULL, &channel_io_ops, d,
|
||||||
"dma-chan", 8 << d->dshift);
|
"dma-chan", 8 << d->dshift);
|
||||||
|
@ -585,12 +588,10 @@ static const VMStateDescription vmstate_dma = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
|
void DMA_init(int high_page_enable)
|
||||||
{
|
{
|
||||||
dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
|
dma_init2(&dma_controllers[0], 0x00, 0, 0x80, high_page_enable ? 0x480 : -1);
|
||||||
high_page_enable ? 0x480 : -1, cpu_request_exit);
|
dma_init2(&dma_controllers[1], 0xc0, 1, 0x88, high_page_enable ? 0x488 : -1);
|
||||||
dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
|
|
||||||
high_page_enable ? 0x488 : -1, cpu_request_exit);
|
|
||||||
vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
|
vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
|
||||||
vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
|
vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
|
||||||
|
|
||||||
|
|
13
hw/i386/pc.c
13
hw/i386/pc.c
|
@ -1452,15 +1452,6 @@ DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus)
|
||||||
return dev;
|
return dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
|
||||||
{
|
|
||||||
CPUState *cpu = current_cpu;
|
|
||||||
|
|
||||||
if (cpu && level) {
|
|
||||||
cpu_exit(cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static const MemoryRegionOps ioport80_io_ops = {
|
static const MemoryRegionOps ioport80_io_ops = {
|
||||||
.write = ioport80_write,
|
.write = ioport80_write,
|
||||||
.read = ioport80_read,
|
.read = ioport80_read,
|
||||||
|
@ -1495,7 +1486,6 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
||||||
qemu_irq rtc_irq = NULL;
|
qemu_irq rtc_irq = NULL;
|
||||||
qemu_irq *a20_line;
|
qemu_irq *a20_line;
|
||||||
ISADevice *i8042, *port92, *vmmouse, *pit = NULL;
|
ISADevice *i8042, *port92, *vmmouse, *pit = NULL;
|
||||||
qemu_irq *cpu_exit_irq;
|
|
||||||
MemoryRegion *ioport80_io = g_new(MemoryRegion, 1);
|
MemoryRegion *ioport80_io = g_new(MemoryRegion, 1);
|
||||||
MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1);
|
MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1);
|
||||||
|
|
||||||
|
@ -1572,8 +1562,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
||||||
port92 = isa_create_simple(isa_bus, "port92");
|
port92 = isa_create_simple(isa_bus, "port92");
|
||||||
port92_init(port92, &a20_line[1]);
|
port92_init(port92, &a20_line[1]);
|
||||||
|
|
||||||
cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
|
DMA_init(0);
|
||||||
DMA_init(0, cpu_exit_irq);
|
|
||||||
|
|
||||||
for(i = 0; i < MAX_FD; i++) {
|
for(i = 0; i < MAX_FD; i++) {
|
||||||
fd[i] = drive_get(IF_FLOPPY, 0, i);
|
fd[i] = drive_get(IF_FLOPPY, 0, i);
|
||||||
|
|
|
@ -100,7 +100,6 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
|
||||||
|
|
||||||
/* 2 82C37 (dma) */
|
/* 2 82C37 (dma) */
|
||||||
isa = isa_create_simple(isabus, "i82374");
|
isa = isa_create_simple(isabus, "i82374");
|
||||||
qdev_connect_gpio_out(DEVICE(isa), 0, s->out[1]);
|
|
||||||
|
|
||||||
/* timer */
|
/* timer */
|
||||||
isa_create_simple(isabus, "mc146818rtc");
|
isa_create_simple(isabus, "mc146818rtc");
|
||||||
|
@ -111,7 +110,7 @@ static void i82378_init(Object *obj)
|
||||||
DeviceState *dev = DEVICE(obj);
|
DeviceState *dev = DEVICE(obj);
|
||||||
I82378State *s = I82378(obj);
|
I82378State *s = I82378(obj);
|
||||||
|
|
||||||
qdev_init_gpio_out(dev, s->out, 2);
|
qdev_init_gpio_out(dev, s->out, 1);
|
||||||
qdev_init_gpio_in(dev, i82378_request_pic_irq, 16);
|
qdev_init_gpio_in(dev, i82378_request_pic_irq, 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -251,15 +251,6 @@ static void network_init (PCIBus *pci_bus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
|
||||||
{
|
|
||||||
CPUState *cpu = current_cpu;
|
|
||||||
|
|
||||||
if (cpu && level) {
|
|
||||||
cpu_exit(cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mips_fulong2e_init(MachineState *machine)
|
static void mips_fulong2e_init(MachineState *machine)
|
||||||
{
|
{
|
||||||
ram_addr_t ram_size = machine->ram_size;
|
ram_addr_t ram_size = machine->ram_size;
|
||||||
|
@ -274,7 +265,6 @@ static void mips_fulong2e_init(MachineState *machine)
|
||||||
long bios_size;
|
long bios_size;
|
||||||
int64_t kernel_entry;
|
int64_t kernel_entry;
|
||||||
qemu_irq *i8259;
|
qemu_irq *i8259;
|
||||||
qemu_irq *cpu_exit_irq;
|
|
||||||
PCIBus *pci_bus;
|
PCIBus *pci_bus;
|
||||||
ISABus *isa_bus;
|
ISABus *isa_bus;
|
||||||
I2CBus *smbus;
|
I2CBus *smbus;
|
||||||
|
@ -375,8 +365,7 @@ static void mips_fulong2e_init(MachineState *machine)
|
||||||
|
|
||||||
/* init other devices */
|
/* init other devices */
|
||||||
pit = pit_init(isa_bus, 0x40, 0, NULL);
|
pit = pit_init(isa_bus, 0x40, 0, NULL);
|
||||||
cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
|
DMA_init(0);
|
||||||
DMA_init(0, cpu_exit_irq);
|
|
||||||
|
|
||||||
/* Super I/O */
|
/* Super I/O */
|
||||||
isa_create_simple(isa_bus, "i8042");
|
isa_create_simple(isa_bus, "i8042");
|
||||||
|
|
|
@ -104,15 +104,6 @@ static const MemoryRegionOps dma_dummy_ops = {
|
||||||
#define MAGNUM_BIOS_SIZE_MAX 0x7e000
|
#define MAGNUM_BIOS_SIZE_MAX 0x7e000
|
||||||
#define MAGNUM_BIOS_SIZE (BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX)
|
#define MAGNUM_BIOS_SIZE (BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX)
|
||||||
|
|
||||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
|
||||||
{
|
|
||||||
CPUState *cpu = current_cpu;
|
|
||||||
|
|
||||||
if (cpu && level) {
|
|
||||||
cpu_exit(cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static CPUUnassignedAccess real_do_unassigned_access;
|
static CPUUnassignedAccess real_do_unassigned_access;
|
||||||
static void mips_jazz_do_unassigned_access(CPUState *cpu, hwaddr addr,
|
static void mips_jazz_do_unassigned_access(CPUState *cpu, hwaddr addr,
|
||||||
bool is_write, bool is_exec,
|
bool is_write, bool is_exec,
|
||||||
|
@ -150,7 +141,6 @@ static void mips_jazz_init(MachineState *machine,
|
||||||
ISADevice *pit;
|
ISADevice *pit;
|
||||||
DriveInfo *fds[MAX_FD];
|
DriveInfo *fds[MAX_FD];
|
||||||
qemu_irq esp_reset, dma_enable;
|
qemu_irq esp_reset, dma_enable;
|
||||||
qemu_irq *cpu_exit_irq;
|
|
||||||
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
||||||
MemoryRegion *bios = g_new(MemoryRegion, 1);
|
MemoryRegion *bios = g_new(MemoryRegion, 1);
|
||||||
MemoryRegion *bios2 = g_new(MemoryRegion, 1);
|
MemoryRegion *bios2 = g_new(MemoryRegion, 1);
|
||||||
|
@ -234,8 +224,7 @@ static void mips_jazz_init(MachineState *machine,
|
||||||
/* ISA devices */
|
/* ISA devices */
|
||||||
i8259 = i8259_init(isa_bus, env->irq[4]);
|
i8259 = i8259_init(isa_bus, env->irq[4]);
|
||||||
isa_bus_irqs(isa_bus, i8259);
|
isa_bus_irqs(isa_bus, i8259);
|
||||||
cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
|
DMA_init(0);
|
||||||
DMA_init(0, cpu_exit_irq);
|
|
||||||
pit = pit_init(isa_bus, 0x40, 0, NULL);
|
pit = pit_init(isa_bus, 0x40, 0, NULL);
|
||||||
pcspk_init(isa_bus, pit);
|
pcspk_init(isa_bus, pit);
|
||||||
|
|
||||||
|
|
|
@ -905,15 +905,6 @@ static void main_cpu_reset(void *opaque)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
|
||||||
{
|
|
||||||
CPUState *cpu = current_cpu;
|
|
||||||
|
|
||||||
if (cpu && level) {
|
|
||||||
cpu_exit(cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
static
|
||||||
void mips_malta_init(MachineState *machine)
|
void mips_malta_init(MachineState *machine)
|
||||||
{
|
{
|
||||||
|
@ -939,7 +930,6 @@ void mips_malta_init(MachineState *machine)
|
||||||
MIPSCPU *cpu;
|
MIPSCPU *cpu;
|
||||||
CPUMIPSState *env;
|
CPUMIPSState *env;
|
||||||
qemu_irq *isa_irq;
|
qemu_irq *isa_irq;
|
||||||
qemu_irq *cpu_exit_irq;
|
|
||||||
int piix4_devfn;
|
int piix4_devfn;
|
||||||
I2CBus *smbus;
|
I2CBus *smbus;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1175,8 +1165,7 @@ void mips_malta_init(MachineState *machine)
|
||||||
smbus_eeprom_init(smbus, 8, smbus_eeprom_buf, smbus_eeprom_size);
|
smbus_eeprom_init(smbus, 8, smbus_eeprom_buf, smbus_eeprom_size);
|
||||||
g_free(smbus_eeprom_buf);
|
g_free(smbus_eeprom_buf);
|
||||||
pit = pit_init(isa_bus, 0x40, 0, NULL);
|
pit = pit_init(isa_bus, 0x40, 0, NULL);
|
||||||
cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
|
DMA_init(0);
|
||||||
DMA_init(0, cpu_exit_irq);
|
|
||||||
|
|
||||||
/* Super I/O */
|
/* Super I/O */
|
||||||
isa_create_simple(isa_bus, "i8042");
|
isa_create_simple(isa_bus, "i8042");
|
||||||
|
|
|
@ -41,8 +41,7 @@ static void handle_event(int event)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event & PVPANIC_PANICKED) {
|
if (event & PVPANIC_PANICKED) {
|
||||||
qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, &error_abort);
|
qemu_system_guest_panicked();
|
||||||
vm_stop(RUN_STATE_GUEST_PANICKED);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -336,15 +336,6 @@ static uint32_t PREP_io_800_readb (void *opaque, uint32_t addr)
|
||||||
|
|
||||||
#define NVRAM_SIZE 0x2000
|
#define NVRAM_SIZE 0x2000
|
||||||
|
|
||||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
|
||||||
{
|
|
||||||
CPUState *cpu = current_cpu;
|
|
||||||
|
|
||||||
if (cpu && level) {
|
|
||||||
cpu_exit(cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ppc_prep_reset(void *opaque)
|
static void ppc_prep_reset(void *opaque)
|
||||||
{
|
{
|
||||||
PowerPCCPU *cpu = opaque;
|
PowerPCCPU *cpu = opaque;
|
||||||
|
@ -626,8 +617,6 @@ static void ppc_prep_init(MachineState *machine)
|
||||||
cpu = POWERPC_CPU(first_cpu);
|
cpu = POWERPC_CPU(first_cpu);
|
||||||
qdev_connect_gpio_out(&pci->qdev, 0,
|
qdev_connect_gpio_out(&pci->qdev, 0,
|
||||||
cpu->env.irq_inputs[PPC6xx_INPUT_INT]);
|
cpu->env.irq_inputs[PPC6xx_INPUT_INT]);
|
||||||
qdev_connect_gpio_out(&pci->qdev, 1,
|
|
||||||
qemu_allocate_irq(cpu_request_exit, NULL, 0));
|
|
||||||
sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9));
|
sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9));
|
||||||
sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11));
|
sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11));
|
||||||
sysbus_connect_irq(&pcihost->busdev, 2, qdev_get_gpio_in(&pci->qdev, 9));
|
sysbus_connect_irq(&pcihost->busdev, 2, qdev_get_gpio_in(&pci->qdev, 9));
|
||||||
|
|
|
@ -214,7 +214,7 @@ static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||||
CPUPPCState *env = &cpu->env;
|
CPUPPCState *env = &cpu->env;
|
||||||
|
|
||||||
cs->halted = 1;
|
cs->halted = 1;
|
||||||
cpu_exit(cs);
|
qemu_cpu_kick(cs);
|
||||||
/*
|
/*
|
||||||
* While stopping a CPU, the guest calls H_CPPR which
|
* While stopping a CPU, the guest calls H_CPPR which
|
||||||
* effectively disables interrupts on XICS level.
|
* effectively disables interrupts on XICS level.
|
||||||
|
|
|
@ -292,7 +292,7 @@ static char *vhost_scsi_get_fw_dev_path(FWPathProvider *p, BusState *bus,
|
||||||
{
|
{
|
||||||
VHostSCSI *s = VHOST_SCSI(dev);
|
VHostSCSI *s = VHOST_SCSI(dev);
|
||||||
/* format: channel@channel/vhost-scsi@target,lun */
|
/* format: channel@channel/vhost-scsi@target,lun */
|
||||||
return g_strdup_printf("channel@%x/%s@%x,%x", s->channel,
|
return g_strdup_printf("/channel@%x/%s@%x,%x", s->channel,
|
||||||
qdev_fw_name(dev), s->target, s->lun);
|
qdev_fw_name(dev), s->target, s->lun);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,9 +109,9 @@ int DMA_write_memory (int nchan, void *buf, int pos, int size)
|
||||||
}
|
}
|
||||||
void DMA_hold_DREQ (int nchan) {}
|
void DMA_hold_DREQ (int nchan) {}
|
||||||
void DMA_release_DREQ (int nchan) {}
|
void DMA_release_DREQ (int nchan) {}
|
||||||
void DMA_schedule(int nchan) {}
|
void DMA_schedule(void) {}
|
||||||
|
|
||||||
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
|
void DMA_init(int high_page_enable)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -112,9 +112,9 @@ int DMA_write_memory (int nchan, void *buf, int pos, int size)
|
||||||
}
|
}
|
||||||
void DMA_hold_DREQ (int nchan) {}
|
void DMA_hold_DREQ (int nchan) {}
|
||||||
void DMA_release_DREQ (int nchan) {}
|
void DMA_release_DREQ (int nchan) {}
|
||||||
void DMA_schedule(int nchan) {}
|
void DMA_schedule(void) {}
|
||||||
|
|
||||||
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
|
void DMA_init(int high_page_enable)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -266,44 +266,6 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
|
||||||
/* memory API */
|
|
||||||
|
|
||||||
typedef struct RAMBlock RAMBlock;
|
|
||||||
|
|
||||||
struct RAMBlock {
|
|
||||||
struct rcu_head rcu;
|
|
||||||
struct MemoryRegion *mr;
|
|
||||||
uint8_t *host;
|
|
||||||
ram_addr_t offset;
|
|
||||||
ram_addr_t used_length;
|
|
||||||
ram_addr_t max_length;
|
|
||||||
void (*resized)(const char*, uint64_t length, void *host);
|
|
||||||
uint32_t flags;
|
|
||||||
/* Protected by iothread lock. */
|
|
||||||
char idstr[256];
|
|
||||||
/* RCU-enabled, writes protected by the ramlist lock */
|
|
||||||
QLIST_ENTRY(RAMBlock) next;
|
|
||||||
int fd;
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
|
|
||||||
{
|
|
||||||
assert(offset < block->used_length);
|
|
||||||
assert(block->host);
|
|
||||||
return (char *)block->host + offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef struct RAMList {
|
|
||||||
QemuMutex mutex;
|
|
||||||
/* Protected by the iothread lock. */
|
|
||||||
unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
|
|
||||||
RAMBlock *mru_block;
|
|
||||||
/* RCU-enabled, writes protected by the ramlist lock. */
|
|
||||||
QLIST_HEAD(, RAMBlock) blocks;
|
|
||||||
uint32_t version;
|
|
||||||
} RAMList;
|
|
||||||
extern RAMList ram_list;
|
|
||||||
|
|
||||||
/* Flags stored in the low bits of the TLB virtual address. These are
|
/* Flags stored in the low bits of the TLB virtual address. These are
|
||||||
defined so that fast path ram access is all zeros. */
|
defined so that fast path ram access is all zeros. */
|
||||||
/* Zero if TLB entry is valid. */
|
/* Zero if TLB entry is valid. */
|
||||||
|
@ -316,9 +278,6 @@ extern RAMList ram_list;
|
||||||
|
|
||||||
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
ram_addr_t last_ram_offset(void);
|
|
||||||
void qemu_mutex_lock_ramlist(void);
|
|
||||||
void qemu_mutex_unlock_ramlist(void);
|
|
||||||
#endif /* !CONFIG_USER_ONLY */
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
||||||
|
|
|
@ -226,7 +226,7 @@ struct TranslationBlock {
|
||||||
struct TranslationBlock *jmp_first;
|
struct TranslationBlock *jmp_first;
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "exec/spinlock.h"
|
#include "qemu/thread.h"
|
||||||
|
|
||||||
typedef struct TBContext TBContext;
|
typedef struct TBContext TBContext;
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ struct TBContext {
|
||||||
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
|
||||||
int nb_tbs;
|
int nb_tbs;
|
||||||
/* any access to the tbs or the page table must use this lock */
|
/* any access to the tbs or the page table must use this lock */
|
||||||
spinlock_t tb_lock;
|
QemuMutex tb_lock;
|
||||||
|
|
||||||
/* statistics */
|
/* statistics */
|
||||||
int tb_flush_count;
|
int tb_flush_count;
|
||||||
|
@ -375,11 +375,17 @@ void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
|
void mmap_lock(void);
|
||||||
|
void mmap_unlock(void);
|
||||||
|
|
||||||
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
||||||
{
|
{
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
static inline void mmap_lock(void) {}
|
||||||
|
static inline void mmap_unlock(void) {}
|
||||||
|
|
||||||
/* cputlb.c */
|
/* cputlb.c */
|
||||||
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
|
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
|
||||||
#endif
|
#endif
|
||||||
|
@ -387,8 +393,9 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
|
||||||
/* vl.c */
|
/* vl.c */
|
||||||
extern int singlestep;
|
extern int singlestep;
|
||||||
|
|
||||||
/* cpu-exec.c */
|
/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
|
||||||
extern volatile sig_atomic_t exit_request;
|
extern CPUState *tcg_current_cpu;
|
||||||
|
extern bool exit_request;
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
|
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
|
||||||
|
|
|
@ -22,6 +22,46 @@
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
#include "hw/xen/xen.h"
|
#include "hw/xen/xen.h"
|
||||||
|
|
||||||
|
typedef struct RAMBlock RAMBlock;
|
||||||
|
|
||||||
|
struct RAMBlock {
|
||||||
|
struct rcu_head rcu;
|
||||||
|
struct MemoryRegion *mr;
|
||||||
|
uint8_t *host;
|
||||||
|
ram_addr_t offset;
|
||||||
|
ram_addr_t used_length;
|
||||||
|
ram_addr_t max_length;
|
||||||
|
void (*resized)(const char*, uint64_t length, void *host);
|
||||||
|
uint32_t flags;
|
||||||
|
/* Protected by iothread lock. */
|
||||||
|
char idstr[256];
|
||||||
|
/* RCU-enabled, writes protected by the ramlist lock */
|
||||||
|
QLIST_ENTRY(RAMBlock) next;
|
||||||
|
int fd;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
|
||||||
|
{
|
||||||
|
assert(offset < block->used_length);
|
||||||
|
assert(block->host);
|
||||||
|
return (char *)block->host + offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct RAMList {
|
||||||
|
QemuMutex mutex;
|
||||||
|
/* Protected by the iothread lock. */
|
||||||
|
unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
|
||||||
|
RAMBlock *mru_block;
|
||||||
|
/* RCU-enabled, writes protected by the ramlist lock. */
|
||||||
|
QLIST_HEAD(, RAMBlock) blocks;
|
||||||
|
uint32_t version;
|
||||||
|
} RAMList;
|
||||||
|
extern RAMList ram_list;
|
||||||
|
|
||||||
|
ram_addr_t last_ram_offset(void);
|
||||||
|
void qemu_mutex_lock_ramlist(void);
|
||||||
|
void qemu_mutex_unlock_ramlist(void);
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
||||||
bool share, const char *mem_path,
|
bool share, const char *mem_path,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2003 Fabrice Bellard
|
|
||||||
*
|
|
||||||
* This library is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU Lesser General Public
|
|
||||||
* License as published by the Free Software Foundation; either
|
|
||||||
* version 2 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* This library is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
* Lesser General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU Lesser General Public
|
|
||||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* configure guarantees us that we have pthreads on any host except
|
|
||||||
* mingw32, which doesn't support any of the user-only targets.
|
|
||||||
* So we can simply assume we have pthread mutexes here.
|
|
||||||
*/
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
|
||||||
|
|
||||||
#include <pthread.h>
|
|
||||||
#define spin_lock pthread_mutex_lock
|
|
||||||
#define spin_unlock pthread_mutex_unlock
|
|
||||||
#define spinlock_t pthread_mutex_t
|
|
||||||
#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/* Empty implementations, on the theory that system mode emulation
|
|
||||||
* is single-threaded. This means that these functions should only
|
|
||||||
* be used from code run in the TCG cpu thread, and cannot protect
|
|
||||||
* data structures which might also be accessed from the IO thread
|
|
||||||
* or from signal handlers.
|
|
||||||
*/
|
|
||||||
typedef int spinlock_t;
|
|
||||||
#define SPIN_LOCK_UNLOCKED 0
|
|
||||||
|
|
||||||
static inline void spin_lock(spinlock_t *lock)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void spin_unlock(spinlock_t *lock)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -112,8 +112,8 @@ int DMA_read_memory (int nchan, void *buf, int pos, int size);
|
||||||
int DMA_write_memory (int nchan, void *buf, int pos, int size);
|
int DMA_write_memory (int nchan, void *buf, int pos, int size);
|
||||||
void DMA_hold_DREQ (int nchan);
|
void DMA_hold_DREQ (int nchan);
|
||||||
void DMA_release_DREQ (int nchan);
|
void DMA_release_DREQ (int nchan);
|
||||||
void DMA_schedule(int nchan);
|
void DMA_schedule(void);
|
||||||
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit);
|
void DMA_init(int high_page_enable);
|
||||||
void DMA_register_channel (int nchan,
|
void DMA_register_channel (int nchan,
|
||||||
DMA_transfer_handler transfer_handler,
|
DMA_transfer_handler transfer_handler,
|
||||||
void *opaque);
|
void *opaque);
|
||||||
|
|
|
@ -203,6 +203,14 @@ time_t mktimegm(struct tm *tm);
|
||||||
int qemu_fdatasync(int fd);
|
int qemu_fdatasync(int fd);
|
||||||
int fcntl_setfl(int fd, int flag);
|
int fcntl_setfl(int fd, int flag);
|
||||||
int qemu_parse_fd(const char *param);
|
int qemu_parse_fd(const char *param);
|
||||||
|
int qemu_strtol(const char *nptr, const char **endptr, int base,
|
||||||
|
long *result);
|
||||||
|
int qemu_strtoul(const char *nptr, const char **endptr, int base,
|
||||||
|
unsigned long *result);
|
||||||
|
int qemu_strtoll(const char *nptr, const char **endptr, int base,
|
||||||
|
int64_t *result);
|
||||||
|
int qemu_strtoull(const char *nptr, const char **endptr, int base,
|
||||||
|
uint64_t *result);
|
||||||
|
|
||||||
int parse_uint(const char *s, unsigned long long *value, char **endptr,
|
int parse_uint(const char *s, unsigned long long *value, char **endptr,
|
||||||
int base);
|
int base);
|
||||||
|
|
|
@ -203,6 +203,7 @@ void qemu_set_fd_handler(int fd,
|
||||||
IOHandler *fd_write,
|
IOHandler *fd_write,
|
||||||
void *opaque);
|
void *opaque);
|
||||||
|
|
||||||
|
GSource *iohandler_get_g_source(void);
|
||||||
#ifdef CONFIG_POSIX
|
#ifdef CONFIG_POSIX
|
||||||
/**
|
/**
|
||||||
* qemu_add_child_watch: Register a child process for reaping.
|
* qemu_add_child_watch: Register a child process for reaping.
|
||||||
|
@ -265,8 +266,6 @@ void qemu_mutex_unlock_iothread(void);
|
||||||
/* internal interfaces */
|
/* internal interfaces */
|
||||||
|
|
||||||
void qemu_fd_register(int fd);
|
void qemu_fd_register(int fd);
|
||||||
void qemu_iohandler_fill(GArray *pollfds);
|
|
||||||
void qemu_iohandler_poll(GArray *pollfds, int rc);
|
|
||||||
|
|
||||||
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
|
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
|
||||||
void qemu_bh_schedule_idle(QEMUBH *bh);
|
void qemu_bh_schedule_idle(QEMUBH *bh);
|
||||||
|
|
|
@ -71,7 +71,7 @@ struct rcu_reader_data {
|
||||||
/* Data used by reader only */
|
/* Data used by reader only */
|
||||||
unsigned depth;
|
unsigned depth;
|
||||||
|
|
||||||
/* Data used for registry, protected by rcu_gp_lock */
|
/* Data used for registry, protected by rcu_registry_lock */
|
||||||
QLIST_ENTRY(rcu_reader_data) node;
|
QLIST_ENTRY(rcu_reader_data) node;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -55,18 +55,18 @@ static inline void seqlock_write_unlock(QemuSeqLock *sl)
|
||||||
static inline unsigned seqlock_read_begin(QemuSeqLock *sl)
|
static inline unsigned seqlock_read_begin(QemuSeqLock *sl)
|
||||||
{
|
{
|
||||||
/* Always fail if a write is in progress. */
|
/* Always fail if a write is in progress. */
|
||||||
unsigned ret = sl->sequence & ~1;
|
unsigned ret = atomic_read(&sl->sequence);
|
||||||
|
|
||||||
/* Read sequence before reading other fields. */
|
/* Read sequence before reading other fields. */
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
return ret;
|
return ret & ~1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
|
static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
|
||||||
{
|
{
|
||||||
/* Read other fields before reading final sequence. */
|
/* Read other fields before reading final sequence. */
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
return unlikely(sl->sequence != start);
|
return unlikely(atomic_read(&sl->sequence) != start);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,52 +0,0 @@
|
||||||
/*
|
|
||||||
* Abstraction layer for defining and using TLS variables
|
|
||||||
*
|
|
||||||
* Copyright (c) 2011 Red Hat, Inc
|
|
||||||
* Copyright (c) 2011 Linaro Limited
|
|
||||||
*
|
|
||||||
* Authors:
|
|
||||||
* Paolo Bonzini <pbonzini@redhat.com>
|
|
||||||
* Peter Maydell <peter.maydell@linaro.org>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License as
|
|
||||||
* published by the Free Software Foundation; either version 2 of
|
|
||||||
* the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License along
|
|
||||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef QEMU_TLS_H
|
|
||||||
#define QEMU_TLS_H
|
|
||||||
|
|
||||||
/* Per-thread variables. Note that we only have implementations
|
|
||||||
* which are really thread-local on Linux; the dummy implementations
|
|
||||||
* define plain global variables.
|
|
||||||
*
|
|
||||||
* This means that for the moment use should be restricted to
|
|
||||||
* per-VCPU variables, which are OK because:
|
|
||||||
* - the only -user mode supporting multiple VCPU threads is linux-user
|
|
||||||
* - TCG system mode is single-threaded regarding VCPUs
|
|
||||||
* - KVM system mode is multi-threaded but limited to Linux
|
|
||||||
*
|
|
||||||
* TODO: proper implementations via Win32 .tls sections and
|
|
||||||
* POSIX pthread_getspecific.
|
|
||||||
*/
|
|
||||||
#ifdef __linux__
|
|
||||||
#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
|
|
||||||
#define DEFINE_TLS(type, x) __thread __typeof__(type) tls__##x
|
|
||||||
#define tls_var(x) tls__##x
|
|
||||||
#else
|
|
||||||
/* Dummy implementations which define plain global variables */
|
|
||||||
#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
|
|
||||||
#define DEFINE_TLS(type, x) __typeof__(type) tls__##x
|
|
||||||
#define tls_var(x) tls__##x
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -28,7 +28,6 @@
|
||||||
#include "exec/memattrs.h"
|
#include "exec/memattrs.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
#include "qemu/thread.h"
|
#include "qemu/thread.h"
|
||||||
#include "qemu/tls.h"
|
|
||||||
#include "qemu/typedefs.h"
|
#include "qemu/typedefs.h"
|
||||||
|
|
||||||
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
|
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
|
||||||
|
@ -244,6 +243,8 @@ struct kvm_run;
|
||||||
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
||||||
* @mem_io_vaddr: Target virtual address at which the memory was accessed.
|
* @mem_io_vaddr: Target virtual address at which the memory was accessed.
|
||||||
* @kvm_fd: vCPU file descriptor for KVM.
|
* @kvm_fd: vCPU file descriptor for KVM.
|
||||||
|
* @work_mutex: Lock to prevent multiple access to queued_work_*.
|
||||||
|
* @queued_work_first: First asynchronous work pending.
|
||||||
*
|
*
|
||||||
* State of one CPU core or thread.
|
* State of one CPU core or thread.
|
||||||
*/
|
*/
|
||||||
|
@ -264,17 +265,19 @@ struct CPUState {
|
||||||
uint32_t host_tid;
|
uint32_t host_tid;
|
||||||
bool running;
|
bool running;
|
||||||
struct QemuCond *halt_cond;
|
struct QemuCond *halt_cond;
|
||||||
struct qemu_work_item *queued_work_first, *queued_work_last;
|
|
||||||
bool thread_kicked;
|
bool thread_kicked;
|
||||||
bool created;
|
bool created;
|
||||||
bool stop;
|
bool stop;
|
||||||
bool stopped;
|
bool stopped;
|
||||||
volatile sig_atomic_t exit_request;
|
bool exit_request;
|
||||||
uint32_t interrupt_request;
|
uint32_t interrupt_request;
|
||||||
int singlestep_enabled;
|
int singlestep_enabled;
|
||||||
int64_t icount_extra;
|
int64_t icount_extra;
|
||||||
sigjmp_buf jmp_env;
|
sigjmp_buf jmp_env;
|
||||||
|
|
||||||
|
QemuMutex work_mutex;
|
||||||
|
struct qemu_work_item *queued_work_first, *queued_work_last;
|
||||||
|
|
||||||
AddressSpace *as;
|
AddressSpace *as;
|
||||||
struct AddressSpaceDispatch *memory_dispatch;
|
struct AddressSpaceDispatch *memory_dispatch;
|
||||||
MemoryListener *tcg_as_listener;
|
MemoryListener *tcg_as_listener;
|
||||||
|
@ -320,7 +323,7 @@ struct CPUState {
|
||||||
offset from AREG0. Leave this field at the end so as to make the
|
offset from AREG0. Leave this field at the end so as to make the
|
||||||
(absolute value) offset as small as possible. This reduces code
|
(absolute value) offset as small as possible. This reduces code
|
||||||
size, especially for hosts without large memory offsets. */
|
size, especially for hosts without large memory offsets. */
|
||||||
volatile sig_atomic_t tcg_exit_req;
|
uint32_t tcg_exit_req;
|
||||||
};
|
};
|
||||||
|
|
||||||
QTAILQ_HEAD(CPUTailQ, CPUState);
|
QTAILQ_HEAD(CPUTailQ, CPUState);
|
||||||
|
@ -333,8 +336,7 @@ extern struct CPUTailQ cpus;
|
||||||
QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
|
QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
|
||||||
#define first_cpu QTAILQ_FIRST(&cpus)
|
#define first_cpu QTAILQ_FIRST(&cpus)
|
||||||
|
|
||||||
DECLARE_TLS(CPUState *, current_cpu);
|
extern __thread CPUState *current_cpu;
|
||||||
#define current_cpu tls_var(current_cpu)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpu_paging_enabled:
|
* cpu_paging_enabled:
|
||||||
|
|
|
@ -69,6 +69,7 @@ int qemu_reset_requested_get(void);
|
||||||
void qemu_system_killed(int signal, pid_t pid);
|
void qemu_system_killed(int signal, pid_t pid);
|
||||||
void qemu_devices_reset(void);
|
void qemu_devices_reset(void);
|
||||||
void qemu_system_reset(bool report);
|
void qemu_system_reset(bool report);
|
||||||
|
void qemu_system_guest_panicked(void);
|
||||||
|
|
||||||
void qemu_add_exit_notifier(Notifier *notify);
|
void qemu_add_exit_notifier(Notifier *notify);
|
||||||
void qemu_remove_exit_notifier(Notifier *notify);
|
void qemu_remove_exit_notifier(Notifier *notify);
|
||||||
|
|
115
iohandler.c
115
iohandler.c
|
@ -32,111 +32,30 @@
|
||||||
#include <sys/wait.h>
|
#include <sys/wait.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct IOHandlerRecord {
|
/* This context runs on top of main loop. We can't reuse qemu_aio_context
|
||||||
IOHandler *fd_read;
|
* because iohandlers mustn't be polled by aio_poll(qemu_aio_context). */
|
||||||
IOHandler *fd_write;
|
static AioContext *iohandler_ctx;
|
||||||
void *opaque;
|
|
||||||
QLIST_ENTRY(IOHandlerRecord) next;
|
|
||||||
int fd;
|
|
||||||
int pollfds_idx;
|
|
||||||
bool deleted;
|
|
||||||
} IOHandlerRecord;
|
|
||||||
|
|
||||||
static QLIST_HEAD(, IOHandlerRecord) io_handlers =
|
static void iohandler_init(void)
|
||||||
QLIST_HEAD_INITIALIZER(io_handlers);
|
{
|
||||||
|
if (!iohandler_ctx) {
|
||||||
|
iohandler_ctx = aio_context_new(&error_abort);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
GSource *iohandler_get_g_source(void)
|
||||||
|
{
|
||||||
|
iohandler_init();
|
||||||
|
return aio_get_g_source(iohandler_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
void qemu_set_fd_handler(int fd,
|
void qemu_set_fd_handler(int fd,
|
||||||
IOHandler *fd_read,
|
IOHandler *fd_read,
|
||||||
IOHandler *fd_write,
|
IOHandler *fd_write,
|
||||||
void *opaque)
|
void *opaque)
|
||||||
{
|
{
|
||||||
IOHandlerRecord *ioh;
|
iohandler_init();
|
||||||
|
aio_set_fd_handler(iohandler_ctx, fd, fd_read, fd_write, opaque);
|
||||||
assert(fd >= 0);
|
|
||||||
|
|
||||||
if (!fd_read && !fd_write) {
|
|
||||||
QLIST_FOREACH(ioh, &io_handlers, next) {
|
|
||||||
if (ioh->fd == fd) {
|
|
||||||
ioh->deleted = 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
QLIST_FOREACH(ioh, &io_handlers, next) {
|
|
||||||
if (ioh->fd == fd)
|
|
||||||
goto found;
|
|
||||||
}
|
|
||||||
ioh = g_malloc0(sizeof(IOHandlerRecord));
|
|
||||||
QLIST_INSERT_HEAD(&io_handlers, ioh, next);
|
|
||||||
found:
|
|
||||||
ioh->fd = fd;
|
|
||||||
ioh->fd_read = fd_read;
|
|
||||||
ioh->fd_write = fd_write;
|
|
||||||
ioh->opaque = opaque;
|
|
||||||
ioh->pollfds_idx = -1;
|
|
||||||
ioh->deleted = 0;
|
|
||||||
qemu_notify_event();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_iohandler_fill(GArray *pollfds)
|
|
||||||
{
|
|
||||||
IOHandlerRecord *ioh;
|
|
||||||
|
|
||||||
QLIST_FOREACH(ioh, &io_handlers, next) {
|
|
||||||
int events = 0;
|
|
||||||
|
|
||||||
if (ioh->deleted)
|
|
||||||
continue;
|
|
||||||
if (ioh->fd_read) {
|
|
||||||
events |= G_IO_IN | G_IO_HUP | G_IO_ERR;
|
|
||||||
}
|
|
||||||
if (ioh->fd_write) {
|
|
||||||
events |= G_IO_OUT | G_IO_ERR;
|
|
||||||
}
|
|
||||||
if (events) {
|
|
||||||
GPollFD pfd = {
|
|
||||||
.fd = ioh->fd,
|
|
||||||
.events = events,
|
|
||||||
};
|
|
||||||
ioh->pollfds_idx = pollfds->len;
|
|
||||||
g_array_append_val(pollfds, pfd);
|
|
||||||
} else {
|
|
||||||
ioh->pollfds_idx = -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void qemu_iohandler_poll(GArray *pollfds, int ret)
|
|
||||||
{
|
|
||||||
if (ret > 0) {
|
|
||||||
IOHandlerRecord *pioh, *ioh;
|
|
||||||
|
|
||||||
QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
|
|
||||||
int revents = 0;
|
|
||||||
|
|
||||||
if (!ioh->deleted && ioh->pollfds_idx != -1) {
|
|
||||||
GPollFD *pfd = &g_array_index(pollfds, GPollFD,
|
|
||||||
ioh->pollfds_idx);
|
|
||||||
revents = pfd->revents;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ioh->deleted && ioh->fd_read &&
|
|
||||||
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR))) {
|
|
||||||
ioh->fd_read(ioh->opaque);
|
|
||||||
}
|
|
||||||
if (!ioh->deleted && ioh->fd_write &&
|
|
||||||
(revents & (G_IO_OUT | G_IO_ERR))) {
|
|
||||||
ioh->fd_write(ioh->opaque);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do this last in case read/write handlers marked it for deletion */
|
|
||||||
if (ioh->deleted) {
|
|
||||||
QLIST_REMOVE(ioh, next);
|
|
||||||
g_free(ioh);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reaping of zombies. right now we're not passing the status to
|
/* reaping of zombies. right now we're not passing the status to
|
||||||
|
|
|
@ -105,7 +105,7 @@ static int pending_cpus;
|
||||||
/* Make sure everything is in a consistent state for calling fork(). */
|
/* Make sure everything is in a consistent state for calling fork(). */
|
||||||
void fork_start(void)
|
void fork_start(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
|
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
pthread_mutex_lock(&exclusive_lock);
|
pthread_mutex_lock(&exclusive_lock);
|
||||||
mmap_fork_start();
|
mmap_fork_start();
|
||||||
}
|
}
|
||||||
|
@ -127,11 +127,11 @@ void fork_end(int child)
|
||||||
pthread_mutex_init(&cpu_list_mutex, NULL);
|
pthread_mutex_init(&cpu_list_mutex, NULL);
|
||||||
pthread_cond_init(&exclusive_cond, NULL);
|
pthread_cond_init(&exclusive_cond, NULL);
|
||||||
pthread_cond_init(&exclusive_resume, NULL);
|
pthread_cond_init(&exclusive_resume, NULL);
|
||||||
pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL);
|
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
gdbserver_fork(thread_cpu);
|
gdbserver_fork(thread_cpu);
|
||||||
} else {
|
} else {
|
||||||
pthread_mutex_unlock(&exclusive_lock);
|
pthread_mutex_unlock(&exclusive_lock);
|
||||||
pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -261,8 +261,6 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||||
int target_msync(abi_ulong start, abi_ulong len, int flags);
|
int target_msync(abi_ulong start, abi_ulong len, int flags);
|
||||||
extern unsigned long last_brk;
|
extern unsigned long last_brk;
|
||||||
extern abi_ulong mmap_next_start;
|
extern abi_ulong mmap_next_start;
|
||||||
void mmap_lock(void);
|
|
||||||
void mmap_unlock(void);
|
|
||||||
abi_ulong mmap_find_vma(abi_ulong, abi_ulong);
|
abi_ulong mmap_find_vma(abi_ulong, abi_ulong);
|
||||||
void cpu_list_lock(void);
|
void cpu_list_lock(void);
|
||||||
void cpu_list_unlock(void);
|
void cpu_list_unlock(void);
|
||||||
|
|
|
@ -4512,6 +4512,7 @@ static void *clone_func(void *arg)
|
||||||
CPUState *cpu;
|
CPUState *cpu;
|
||||||
TaskState *ts;
|
TaskState *ts;
|
||||||
|
|
||||||
|
rcu_register_thread();
|
||||||
env = info->env;
|
env = info->env;
|
||||||
cpu = ENV_GET_CPU(env);
|
cpu = ENV_GET_CPU(env);
|
||||||
thread_cpu = cpu;
|
thread_cpu = cpu;
|
||||||
|
@ -5613,6 +5614,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
||||||
thread_cpu = NULL;
|
thread_cpu = NULL;
|
||||||
object_unref(OBJECT(cpu));
|
object_unref(OBJECT(cpu));
|
||||||
g_free(ts);
|
g_free(ts);
|
||||||
|
rcu_unregister_thread();
|
||||||
pthread_exit(NULL);
|
pthread_exit(NULL);
|
||||||
}
|
}
|
||||||
#ifdef TARGET_GPROF
|
#ifdef TARGET_GPROF
|
||||||
|
|
|
@ -161,6 +161,9 @@ int qemu_init_main_loop(Error **errp)
|
||||||
src = aio_get_g_source(qemu_aio_context);
|
src = aio_get_g_source(qemu_aio_context);
|
||||||
g_source_attach(src, NULL);
|
g_source_attach(src, NULL);
|
||||||
g_source_unref(src);
|
g_source_unref(src);
|
||||||
|
src = iohandler_get_g_source();
|
||||||
|
g_source_attach(src, NULL);
|
||||||
|
g_source_unref(src);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -487,7 +490,6 @@ int main_loop_wait(int nonblocking)
|
||||||
#ifdef CONFIG_SLIRP
|
#ifdef CONFIG_SLIRP
|
||||||
slirp_pollfds_fill(gpollfds, &timeout);
|
slirp_pollfds_fill(gpollfds, &timeout);
|
||||||
#endif
|
#endif
|
||||||
qemu_iohandler_fill(gpollfds);
|
|
||||||
|
|
||||||
if (timeout == UINT32_MAX) {
|
if (timeout == UINT32_MAX) {
|
||||||
timeout_ns = -1;
|
timeout_ns = -1;
|
||||||
|
@ -500,7 +502,6 @@ int main_loop_wait(int nonblocking)
|
||||||
&main_loop_tlg));
|
&main_loop_tlg));
|
||||||
|
|
||||||
ret = os_host_main_loop_wait(timeout_ns);
|
ret = os_host_main_loop_wait(timeout_ns);
|
||||||
qemu_iohandler_poll(gpollfds, ret);
|
|
||||||
#ifdef CONFIG_SLIRP
|
#ifdef CONFIG_SLIRP
|
||||||
slirp_pollfds_poll(gpollfds, (ret < 0));
|
slirp_pollfds_poll(gpollfds, (ret < 0));
|
||||||
#endif
|
#endif
|
||||||
|
|
14
qmp.c
14
qmp.c
|
@ -49,14 +49,20 @@ VersionInfo *qmp_query_version(Error **errp)
|
||||||
{
|
{
|
||||||
VersionInfo *info = g_new0(VersionInfo, 1);
|
VersionInfo *info = g_new0(VersionInfo, 1);
|
||||||
const char *version = QEMU_VERSION;
|
const char *version = QEMU_VERSION;
|
||||||
char *tmp;
|
const char *tmp;
|
||||||
|
int err;
|
||||||
|
|
||||||
info->qemu = g_new0(VersionTriple, 1);
|
info->qemu = g_new0(VersionTriple, 1);
|
||||||
info->qemu->major = strtol(version, &tmp, 10);
|
err = qemu_strtoll(version, &tmp, 10, &info->qemu->major);
|
||||||
|
assert(err == 0);
|
||||||
tmp++;
|
tmp++;
|
||||||
info->qemu->minor = strtol(tmp, &tmp, 10);
|
|
||||||
|
err = qemu_strtoll(tmp, &tmp, 10, &info->qemu->minor);
|
||||||
|
assert(err == 0);
|
||||||
tmp++;
|
tmp++;
|
||||||
info->qemu->micro = strtol(tmp, &tmp, 10);
|
|
||||||
|
err = qemu_strtoll(tmp, &tmp, 10, &info->qemu->micro);
|
||||||
|
assert(err == 0);
|
||||||
info->package = g_strdup(QEMU_PKGVERSION);
|
info->package = g_strdup(QEMU_PKGVERSION);
|
||||||
|
|
||||||
return info;
|
return info;
|
||||||
|
|
|
@ -114,6 +114,8 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
|
||||||
void cpu_exit(CPUState *cpu)
|
void cpu_exit(CPUState *cpu)
|
||||||
{
|
{
|
||||||
cpu->exit_request = 1;
|
cpu->exit_request = 1;
|
||||||
|
/* Ensure cpu_exec will see the exit request after TCG has exited. */
|
||||||
|
smp_wmb();
|
||||||
cpu->tcg_exit_req = 1;
|
cpu->tcg_exit_req = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -314,6 +316,7 @@ static void cpu_common_initfn(Object *obj)
|
||||||
|
|
||||||
cpu->cpu_index = -1;
|
cpu->cpu_index = -1;
|
||||||
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
|
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
|
||||||
|
qemu_mutex_init(&cpu->work_mutex);
|
||||||
QTAILQ_INIT(&cpu->breakpoints);
|
QTAILQ_INIT(&cpu->breakpoints);
|
||||||
QTAILQ_INIT(&cpu->watchpoints);
|
QTAILQ_INIT(&cpu->watchpoints);
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,44 +141,22 @@ our $Ident = qr{
|
||||||
}x;
|
}x;
|
||||||
our $Storage = qr{extern|static|asmlinkage};
|
our $Storage = qr{extern|static|asmlinkage};
|
||||||
our $Sparse = qr{
|
our $Sparse = qr{
|
||||||
__user|
|
__force
|
||||||
__kernel|
|
|
||||||
__force|
|
|
||||||
__iomem|
|
|
||||||
__must_check|
|
|
||||||
__init_refok|
|
|
||||||
__kprobes|
|
|
||||||
__ref
|
|
||||||
}x;
|
}x;
|
||||||
|
|
||||||
# Notes to $Attribute:
|
# Notes to $Attribute:
|
||||||
# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
|
|
||||||
our $Attribute = qr{
|
our $Attribute = qr{
|
||||||
const|
|
const|
|
||||||
__percpu|
|
volatile|
|
||||||
__nocast|
|
QEMU_NORETURN|
|
||||||
__safe|
|
QEMU_WARN_UNUSED_RESULT|
|
||||||
__bitwise__|
|
QEMU_SENTINEL|
|
||||||
__packed__|
|
QEMU_ARTIFICIAL|
|
||||||
__packed2__|
|
QEMU_PACKED|
|
||||||
__naked|
|
GCC_FMT_ATTR
|
||||||
__maybe_unused|
|
|
||||||
__always_unused|
|
|
||||||
__noreturn|
|
|
||||||
__used|
|
|
||||||
__cold|
|
|
||||||
__noclone|
|
|
||||||
__deprecated|
|
|
||||||
__read_mostly|
|
|
||||||
__kprobes|
|
|
||||||
__(?:mem|cpu|dev|)(?:initdata|initconst|init\b)|
|
|
||||||
____cacheline_aligned|
|
|
||||||
____cacheline_aligned_in_smp|
|
|
||||||
____cacheline_internodealigned_in_smp|
|
|
||||||
__weak
|
|
||||||
}x;
|
}x;
|
||||||
our $Modifier;
|
our $Modifier;
|
||||||
our $Inline = qr{inline|__always_inline|noinline};
|
our $Inline = qr{inline};
|
||||||
our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
|
our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
|
||||||
our $Lval = qr{$Ident(?:$Member)*};
|
our $Lval = qr{$Ident(?:$Member)*};
|
||||||
|
|
||||||
|
@ -215,14 +193,6 @@ our $typeTypedefs = qr{(?x:
|
||||||
| QEMUBH # all uppercase
|
| QEMUBH # all uppercase
|
||||||
)};
|
)};
|
||||||
|
|
||||||
our $logFunctions = qr{(?x:
|
|
||||||
printk|
|
|
||||||
pr_(debug|dbg|vdbg|devel|info|warning|err|notice|alert|crit|emerg|cont)|
|
|
||||||
(dev|netdev|netif)_(printk|dbg|vdbg|info|warn|err|notice|alert|crit|emerg|WARN)|
|
|
||||||
WARN|
|
|
||||||
panic
|
|
||||||
)};
|
|
||||||
|
|
||||||
our @typeList = (
|
our @typeList = (
|
||||||
qr{void},
|
qr{void},
|
||||||
qr{(?:unsigned\s+)?char},
|
qr{(?:unsigned\s+)?char},
|
||||||
|
@ -243,20 +213,20 @@ our @typeList = (
|
||||||
qr{${Ident}_handler},
|
qr{${Ident}_handler},
|
||||||
qr{${Ident}_handler_fn},
|
qr{${Ident}_handler_fn},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
# This can be modified by sub possible. Since it can be empty, be careful
|
||||||
|
# about regexes that always match, because they can cause infinite loops.
|
||||||
our @modifierList = (
|
our @modifierList = (
|
||||||
qr{fastcall},
|
|
||||||
);
|
);
|
||||||
|
|
||||||
our $allowed_asm_includes = qr{(?x:
|
|
||||||
irq|
|
|
||||||
memory
|
|
||||||
)};
|
|
||||||
# memory.h: ARM has a custom one
|
|
||||||
|
|
||||||
sub build_types {
|
sub build_types {
|
||||||
my $mods = "(?x: \n" . join("|\n ", @modifierList) . "\n)";
|
|
||||||
my $all = "(?x: \n" . join("|\n ", @typeList) . "\n)";
|
my $all = "(?x: \n" . join("|\n ", @typeList) . "\n)";
|
||||||
$Modifier = qr{(?:$Attribute|$Sparse|$mods)};
|
if (@modifierList > 0) {
|
||||||
|
my $mods = "(?x: \n" . join("|\n ", @modifierList) . "\n)";
|
||||||
|
$Modifier = qr{(?:$Attribute|$Sparse|$mods)};
|
||||||
|
} else {
|
||||||
|
$Modifier = qr{(?:$Attribute|$Sparse)};
|
||||||
|
}
|
||||||
$NonptrType = qr{
|
$NonptrType = qr{
|
||||||
(?:$Modifier\s+|const\s+)*
|
(?:$Modifier\s+|const\s+)*
|
||||||
(?:
|
(?:
|
||||||
|
@ -277,27 +247,6 @@ build_types();
|
||||||
|
|
||||||
$chk_signoff = 0 if ($file);
|
$chk_signoff = 0 if ($file);
|
||||||
|
|
||||||
my @dep_includes = ();
|
|
||||||
my @dep_functions = ();
|
|
||||||
my $removal = "Documentation/feature-removal-schedule.txt";
|
|
||||||
if ($tree && -f "$root/$removal") {
|
|
||||||
open(my $REMOVE, '<', "$root/$removal") ||
|
|
||||||
die "$P: $removal: open failed - $!\n";
|
|
||||||
while (<$REMOVE>) {
|
|
||||||
if (/^Check:\s+(.*\S)/) {
|
|
||||||
for my $entry (split(/[, ]+/, $1)) {
|
|
||||||
if ($entry =~ m@include/(.*)@) {
|
|
||||||
push(@dep_includes, $1);
|
|
||||||
|
|
||||||
} elsif ($entry !~ m@/@) {
|
|
||||||
push(@dep_functions, $entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close($REMOVE);
|
|
||||||
}
|
|
||||||
|
|
||||||
my @rawlines = ();
|
my @rawlines = ();
|
||||||
my @lines = ();
|
my @lines = ();
|
||||||
my $vname;
|
my $vname;
|
||||||
|
@ -1127,33 +1076,6 @@ sub CHK {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sub check_absolute_file {
|
|
||||||
my ($absolute, $herecurr) = @_;
|
|
||||||
my $file = $absolute;
|
|
||||||
|
|
||||||
##print "absolute<$absolute>\n";
|
|
||||||
|
|
||||||
# See if any suffix of this path is a path within the tree.
|
|
||||||
while ($file =~ s@^[^/]*/@@) {
|
|
||||||
if (-f "$root/$file") {
|
|
||||||
##print "file<$file>\n";
|
|
||||||
last;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (! -f _) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
# It is, so see if the prefix is acceptable.
|
|
||||||
my $prefix = $absolute;
|
|
||||||
substr($prefix, -length($file)) = '';
|
|
||||||
|
|
||||||
##print "prefix<$prefix>\n";
|
|
||||||
if ($prefix ne ".../") {
|
|
||||||
WARN("use relative pathname instead of absolute in changelog text\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sub process {
|
sub process {
|
||||||
my $filename = shift;
|
my $filename = shift;
|
||||||
|
|
||||||
|
@ -1196,10 +1118,6 @@ sub process {
|
||||||
my %suppress_export;
|
my %suppress_export;
|
||||||
|
|
||||||
# Pre-scan the patch sanitizing the lines.
|
# Pre-scan the patch sanitizing the lines.
|
||||||
# Pre-scan the patch looking for any __setup documentation.
|
|
||||||
#
|
|
||||||
my @setup_docs = ();
|
|
||||||
my $setup_docs = 0;
|
|
||||||
|
|
||||||
sanitise_line_reset();
|
sanitise_line_reset();
|
||||||
my $line;
|
my $line;
|
||||||
|
@ -1207,13 +1125,6 @@ sub process {
|
||||||
$linenr++;
|
$linenr++;
|
||||||
$line = $rawline;
|
$line = $rawline;
|
||||||
|
|
||||||
if ($rawline=~/^\+\+\+\s+(\S+)/) {
|
|
||||||
$setup_docs = 0;
|
|
||||||
if ($1 =~ m@Documentation/kernel-parameters.txt$@) {
|
|
||||||
$setup_docs = 1;
|
|
||||||
}
|
|
||||||
#next;
|
|
||||||
}
|
|
||||||
if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
|
if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
|
||||||
$realline=$1-1;
|
$realline=$1-1;
|
||||||
if (defined $2) {
|
if (defined $2) {
|
||||||
|
@ -1272,10 +1183,6 @@ sub process {
|
||||||
|
|
||||||
#print "==>$rawline\n";
|
#print "==>$rawline\n";
|
||||||
#print "-->$line\n";
|
#print "-->$line\n";
|
||||||
|
|
||||||
if ($setup_docs && $line =~ /^\+/) {
|
|
||||||
push(@setup_docs, $line);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
$prefix = '';
|
$prefix = '';
|
||||||
|
@ -1350,9 +1257,6 @@ sub process {
|
||||||
WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
|
WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ($realfile =~ m@^include/asm/@) {
|
|
||||||
ERROR("do not modify files in include/asm, change architecture specific files in include/asm-<architecture>\n" . "$here$rawline\n");
|
|
||||||
}
|
|
||||||
next;
|
next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1367,7 +1271,7 @@ sub process {
|
||||||
# Check for incorrect file permissions
|
# Check for incorrect file permissions
|
||||||
if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
|
if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
|
||||||
my $permhere = $here . "FILE: $realfile\n";
|
my $permhere = $here . "FILE: $realfile\n";
|
||||||
if ($realfile =~ /(Makefile|Kconfig|\.c|\.cpp|\.h|\.S|\.tmpl)$/) {
|
if ($realfile =~ /(\bMakefile(?:\.objs)?|\.c|\.cc|\.cpp|\.h|\.mak|\.[sS])$/) {
|
||||||
ERROR("do not set execute permissions for source files\n" . $permhere);
|
ERROR("do not set execute permissions for source files\n" . $permhere);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1392,20 +1296,6 @@ sub process {
|
||||||
$herecurr) if (!$emitted_corrupt++);
|
$herecurr) if (!$emitted_corrupt++);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check for absolute kernel paths.
|
|
||||||
if ($tree) {
|
|
||||||
while ($line =~ m{(?:^|\s)(/\S*)}g) {
|
|
||||||
my $file = $1;
|
|
||||||
|
|
||||||
if ($file =~ m{^(.*?)(?::\d+)+:?$} &&
|
|
||||||
check_absolute_file($1, $herecurr)) {
|
|
||||||
#
|
|
||||||
} else {
|
|
||||||
check_absolute_file($file, $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
|
# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
|
||||||
if (($realfile =~ /^$/ || $line =~ /^\+/) &&
|
if (($realfile =~ /^$/ || $line =~ /^\+/) &&
|
||||||
$rawline !~ m/^$UTF8*$/) {
|
$rawline !~ m/^$UTF8*$/) {
|
||||||
|
@ -1432,45 +1322,12 @@ sub process {
|
||||||
$rpt_cleaners = 1;
|
$rpt_cleaners = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for Kconfig help text having a real description
|
|
||||||
# Only applies when adding the entry originally, after that we do not have
|
|
||||||
# sufficient context to determine whether it is indeed long enough.
|
|
||||||
if ($realfile =~ /Kconfig/ &&
|
|
||||||
$line =~ /\+\s*(?:---)?help(?:---)?$/) {
|
|
||||||
my $length = 0;
|
|
||||||
my $cnt = $realcnt;
|
|
||||||
my $ln = $linenr + 1;
|
|
||||||
my $f;
|
|
||||||
my $is_end = 0;
|
|
||||||
while ($cnt > 0 && defined $lines[$ln - 1]) {
|
|
||||||
$f = $lines[$ln - 1];
|
|
||||||
$cnt-- if ($lines[$ln - 1] !~ /^-/);
|
|
||||||
$is_end = $lines[$ln - 1] =~ /^\+/;
|
|
||||||
$ln++;
|
|
||||||
|
|
||||||
next if ($f =~ /^-/);
|
|
||||||
$f =~ s/^.//;
|
|
||||||
$f =~ s/#.*//;
|
|
||||||
$f =~ s/^\s+//;
|
|
||||||
next if ($f =~ /^$/);
|
|
||||||
if ($f =~ /^\s*config\s/) {
|
|
||||||
$is_end = 1;
|
|
||||||
last;
|
|
||||||
}
|
|
||||||
$length++;
|
|
||||||
}
|
|
||||||
WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_end && $length < 4);
|
|
||||||
#print "is_end<$is_end> length<$length>\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
# check we are in a valid source file if not then ignore this hunk
|
# check we are in a valid source file if not then ignore this hunk
|
||||||
next if ($realfile !~ /\.(h|c|cpp|s|S|pl|sh)$/);
|
next if ($realfile !~ /\.(h|c|cpp|s|S|pl|sh)$/);
|
||||||
|
|
||||||
#80 column limit
|
#80 column limit
|
||||||
if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
|
if ($line =~ /^\+/ &&
|
||||||
$rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
|
!($line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
|
||||||
!($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:,|\)\s*;)\s*$/ ||
|
|
||||||
$line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
|
|
||||||
$length > 80)
|
$length > 80)
|
||||||
{
|
{
|
||||||
WARN("line over 80 characters\n" . $herecurr);
|
WARN("line over 80 characters\n" . $herecurr);
|
||||||
|
@ -1486,18 +1343,6 @@ sub process {
|
||||||
WARN("adding a line without newline at end of file\n" . $herecurr);
|
WARN("adding a line without newline at end of file\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Blackfin: use hi/lo macros
|
|
||||||
if ($realfile =~ m@arch/blackfin/.*\.S$@) {
|
|
||||||
if ($line =~ /\.[lL][[:space:]]*=.*&[[:space:]]*0x[fF][fF][fF][fF]/) {
|
|
||||||
my $herevet = "$here\n" . cat_vet($line) . "\n";
|
|
||||||
ERROR("use the LO() macro, not (... & 0xFFFF)\n" . $herevet);
|
|
||||||
}
|
|
||||||
if ($line =~ /\.[hH][[:space:]]*=.*>>[[:space:]]*16/) {
|
|
||||||
my $herevet = "$here\n" . cat_vet($line) . "\n";
|
|
||||||
ERROR("use the HI() macro, not (... >> 16)\n" . $herevet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# check we are in a valid source file C or perl if not then ignore this hunk
|
# check we are in a valid source file C or perl if not then ignore this hunk
|
||||||
next if ($realfile !~ /\.(h|c|cpp|pl)$/);
|
next if ($realfile !~ /\.(h|c|cpp|pl)$/);
|
||||||
|
|
||||||
|
@ -1516,16 +1361,6 @@ sub process {
|
||||||
WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr);
|
WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Blackfin: don't use __builtin_bfin_[cs]sync
|
|
||||||
if ($line =~ /__builtin_bfin_csync/) {
|
|
||||||
my $herevet = "$here\n" . cat_vet($line) . "\n";
|
|
||||||
ERROR("use the CSYNC() macro in asm/blackfin.h\n" . $herevet);
|
|
||||||
}
|
|
||||||
if ($line =~ /__builtin_bfin_ssync/) {
|
|
||||||
my $herevet = "$here\n" . cat_vet($line) . "\n";
|
|
||||||
ERROR("use the SSYNC() macro in asm/blackfin.h\n" . $herevet);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check for potential 'bare' types
|
# Check for potential 'bare' types
|
||||||
my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
|
my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
|
||||||
$realline_next);
|
$realline_next);
|
||||||
|
@ -1809,50 +1644,6 @@ sub process {
|
||||||
$line =~ s@//.*@@;
|
$line =~ s@//.*@@;
|
||||||
$opline =~ s@//.*@@;
|
$opline =~ s@//.*@@;
|
||||||
|
|
||||||
# EXPORT_SYMBOL should immediately follow the thing it is exporting, consider
|
|
||||||
# the whole statement.
|
|
||||||
#print "APW <$lines[$realline_next - 1]>\n";
|
|
||||||
if (defined $realline_next &&
|
|
||||||
exists $lines[$realline_next - 1] &&
|
|
||||||
!defined $suppress_export{$realline_next} &&
|
|
||||||
($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
|
|
||||||
$lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
|
|
||||||
# Handle definitions which produce identifiers with
|
|
||||||
# a prefix:
|
|
||||||
# XXX(foo);
|
|
||||||
# EXPORT_SYMBOL(something_foo);
|
|
||||||
my $name = $1;
|
|
||||||
if ($stat =~ /^.([A-Z_]+)\s*\(\s*($Ident)/ &&
|
|
||||||
$name =~ /^${Ident}_$2/) {
|
|
||||||
#print "FOO C name<$name>\n";
|
|
||||||
$suppress_export{$realline_next} = 1;
|
|
||||||
|
|
||||||
} elsif ($stat !~ /(?:
|
|
||||||
\n.}\s*$|
|
|
||||||
^.DEFINE_$Ident\(\Q$name\E\)|
|
|
||||||
^.DECLARE_$Ident\(\Q$name\E\)|
|
|
||||||
^.LIST_HEAD\(\Q$name\E\)|
|
|
||||||
^.(?:$Storage\s+)?$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(|
|
|
||||||
\b\Q$name\E(?:\s+$Attribute)*\s*(?:;|=|\[|\()
|
|
||||||
)/x) {
|
|
||||||
#print "FOO A<$lines[$realline_next - 1]> stat<$stat> name<$name>\n";
|
|
||||||
$suppress_export{$realline_next} = 2;
|
|
||||||
} else {
|
|
||||||
$suppress_export{$realline_next} = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!defined $suppress_export{$linenr} &&
|
|
||||||
$prevline =~ /^.\s*$/ &&
|
|
||||||
($line =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
|
|
||||||
$line =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
|
|
||||||
#print "FOO B <$lines[$linenr - 1]>\n";
|
|
||||||
$suppress_export{$linenr} = 2;
|
|
||||||
}
|
|
||||||
if (defined $suppress_export{$linenr} &&
|
|
||||||
$suppress_export{$linenr} == 2) {
|
|
||||||
WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for global initialisers.
|
# check for global initialisers.
|
||||||
if ($line =~ /^.$Type\s*$Ident\s*(?:\s+$Modifier)*\s*=\s*(0|NULL|false)\s*;/) {
|
if ($line =~ /^.$Type\s*$Ident\s*(?:\s+$Modifier)*\s*=\s*(0|NULL|false)\s*;/) {
|
||||||
ERROR("do not initialise globals to 0 or NULL\n" .
|
ERROR("do not initialise globals to 0 or NULL\n" .
|
||||||
|
@ -1900,40 +1691,6 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# # no BUG() or BUG_ON()
|
|
||||||
# if ($line =~ /\b(BUG|BUG_ON)\b/) {
|
|
||||||
# print "Try to use WARN_ON & Recovery code rather than BUG() or BUG_ON()\n";
|
|
||||||
# print "$herecurr";
|
|
||||||
# $clean = 0;
|
|
||||||
# }
|
|
||||||
|
|
||||||
if ($line =~ /\bLINUX_VERSION_CODE\b/) {
|
|
||||||
WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# printk should use KERN_* levels. Note that follow on printk's on the
|
|
||||||
# same line do not need a level, so we use the current block context
|
|
||||||
# to try and find and validate the current printk. In summary the current
|
|
||||||
# printk includes all preceding printk's which have no newline on the end.
|
|
||||||
# we assume the first bad printk is the one to report.
|
|
||||||
if ($line =~ /\bprintk\((?!KERN_)\s*"/) {
|
|
||||||
my $ok = 0;
|
|
||||||
for (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {
|
|
||||||
#print "CHECK<$lines[$ln - 1]\n";
|
|
||||||
# we have a preceding printk if it ends
|
|
||||||
# with "\n" ignore it, else it is to blame
|
|
||||||
if ($lines[$ln - 1] =~ m{\bprintk\(}) {
|
|
||||||
if ($rawlines[$ln - 1] !~ m{\\n"}) {
|
|
||||||
$ok = 1;
|
|
||||||
}
|
|
||||||
last;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ($ok == 0) {
|
|
||||||
WARN("printk() should include KERN_ facility level\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# function brace can't be on same line, except for #defines of do while,
|
# function brace can't be on same line, except for #defines of do while,
|
||||||
# or if closed on same line
|
# or if closed on same line
|
||||||
if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
|
if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
|
||||||
|
@ -1947,9 +1704,14 @@ sub process {
|
||||||
ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
|
ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ... however, open braces on typedef lines should be avoided.
|
||||||
|
if ($line =~ /^.\s*typedef\s+(enum|union|struct)(?:\s+$Ident\b)?.*[^;]$/) {
|
||||||
|
ERROR("typedefs should be separate from struct declaration\n" . $herecurr);
|
||||||
|
}
|
||||||
|
|
||||||
# missing space after union, struct or enum definition
|
# missing space after union, struct or enum definition
|
||||||
if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) {
|
if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) {
|
||||||
WARN("missing space after $1 definition\n" . $herecurr);
|
ERROR("missing space after $1 definition\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for spacing round square brackets; allowed:
|
# check for spacing round square brackets; allowed:
|
||||||
|
@ -2190,26 +1952,6 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for multiple assignments
|
|
||||||
if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
|
|
||||||
CHK("multiple assignments should be avoided\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
## # check for multiple declarations, allowing for a function declaration
|
|
||||||
## # continuation.
|
|
||||||
## if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
|
|
||||||
## $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
|
|
||||||
##
|
|
||||||
## # Remove any bracketed sections to ensure we do not
|
|
||||||
## # falsly report the parameters of functions.
|
|
||||||
## my $ln = $line;
|
|
||||||
## while ($ln =~ s/\([^\(\)]*\)//g) {
|
|
||||||
## }
|
|
||||||
## if ($ln =~ /,/) {
|
|
||||||
## WARN("declaring multiple variables together should be avoided\n" . $herecurr);
|
|
||||||
## }
|
|
||||||
## }
|
|
||||||
|
|
||||||
#need space before brace following if, while, etc
|
#need space before brace following if, while, etc
|
||||||
if (($line =~ /\(.*\){/ && $line !~ /\($Type\){/) ||
|
if (($line =~ /\(.*\){/ && $line !~ /\($Type\){/) ||
|
||||||
$line =~ /do{/) {
|
$line =~ /do{/) {
|
||||||
|
@ -2267,7 +2009,7 @@ sub process {
|
||||||
if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
|
if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
|
||||||
my $name = $1;
|
my $name = $1;
|
||||||
if ($name ne 'EOF' && $name ne 'ERROR') {
|
if ($name ne 'EOF' && $name ne 'ERROR') {
|
||||||
CHK("return of an errno should typically be -ve (return -$1)\n" . $herecurr);
|
WARN("return of an errno should typically be -ve (return -$1)\n" . $herecurr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2398,22 +2140,6 @@ sub process {
|
||||||
WARN("Whitepspace after \\ makes next lines useless\n" . $herecurr);
|
WARN("Whitepspace after \\ makes next lines useless\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
|
|
||||||
if ($tree && $rawline =~ m{^.\s*\#\s*include\s*\<asm\/(.*)\.h\>}) {
|
|
||||||
my $file = "$1.h";
|
|
||||||
my $checkfile = "include/linux/$file";
|
|
||||||
if (-f "$root/$checkfile" &&
|
|
||||||
$realfile ne $checkfile &&
|
|
||||||
$1 !~ /$allowed_asm_includes/)
|
|
||||||
{
|
|
||||||
if ($realfile =~ m{^arch/}) {
|
|
||||||
CHK("Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
|
|
||||||
} else {
|
|
||||||
WARN("Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# multi-statement macros should be enclosed in a do while loop, grab the
|
# multi-statement macros should be enclosed in a do while loop, grab the
|
||||||
# first statement and ensure its the whole macro if its not enclosed
|
# first statement and ensure its the whole macro if its not enclosed
|
||||||
# in a known good container
|
# in a known good container
|
||||||
|
@ -2508,15 +2234,6 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...
|
|
||||||
# all assignments may have only one of the following with an assignment:
|
|
||||||
# .
|
|
||||||
# ALIGN(...)
|
|
||||||
# VMLINUX_SYMBOL(...)
|
|
||||||
if ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\s)$Ident\s*=|=\s*$Ident(?:\s|$))/) {
|
|
||||||
WARN("vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for missing bracing round if etc
|
# check for missing bracing round if etc
|
||||||
if ($line =~ /(^.*)\bif\b/ && $line !~ /\#\s*if/) {
|
if ($line =~ /(^.*)\bif\b/ && $line !~ /\#\s*if/) {
|
||||||
my ($level, $endln, @chunks) =
|
my ($level, $endln, @chunks) =
|
||||||
|
@ -2644,64 +2361,23 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# don't include deprecated include files (uses RAW line)
|
|
||||||
for my $inc (@dep_includes) {
|
|
||||||
if ($rawline =~ m@^.\s*\#\s*include\s*\<$inc>@) {
|
|
||||||
ERROR("Don't use <$inc>: see Documentation/feature-removal-schedule.txt\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# don't use deprecated functions
|
|
||||||
for my $func (@dep_functions) {
|
|
||||||
if ($line =~ /\b$func\b/) {
|
|
||||||
ERROR("Don't use $func(): see Documentation/feature-removal-schedule.txt\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# no volatiles please
|
# no volatiles please
|
||||||
my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
|
my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
|
||||||
if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
|
if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
|
||||||
WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
|
WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# SPIN_LOCK_UNLOCKED & RW_LOCK_UNLOCKED are deprecated
|
|
||||||
if ($line =~ /\b(SPIN_LOCK_UNLOCKED|RW_LOCK_UNLOCKED)/) {
|
|
||||||
ERROR("Use of $1 is deprecated: see Documentation/spinlocks.txt\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# warn about #if 0
|
# warn about #if 0
|
||||||
if ($line =~ /^.\s*\#\s*if\s+0\b/) {
|
if ($line =~ /^.\s*\#\s*if\s+0\b/) {
|
||||||
CHK("if this code is redundant consider removing it\n" .
|
WARN("if this code is redundant consider removing it\n" .
|
||||||
$herecurr);
|
$herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for needless kfree() checks
|
# check for needless g_free() checks
|
||||||
if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
|
if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
|
||||||
my $expr = $1;
|
my $expr = $1;
|
||||||
if ($line =~ /\bkfree\(\Q$expr\E\);/) {
|
if ($line =~ /\bg_free\(\Q$expr\E\);/) {
|
||||||
WARN("kfree(NULL) is safe this check is probably not required\n" . $hereprev);
|
WARN("g_free(NULL) is safe this check is probably not required\n" . $hereprev);
|
||||||
}
|
|
||||||
}
|
|
||||||
# check for needless usb_free_urb() checks
|
|
||||||
if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
|
|
||||||
my $expr = $1;
|
|
||||||
if ($line =~ /\busb_free_urb\(\Q$expr\E\);/) {
|
|
||||||
WARN("usb_free_urb(NULL) is safe this check is probably not required\n" . $hereprev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# prefer usleep_range over udelay
|
|
||||||
if ($line =~ /\budelay\s*\(\s*(\w+)\s*\)/) {
|
|
||||||
# ignore udelay's < 10, however
|
|
||||||
if (! (($1 =~ /(\d+)/) && ($1 < 10)) ) {
|
|
||||||
CHK("usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\n" . $line);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# warn about unexpectedly long msleep's
|
|
||||||
if ($line =~ /\bmsleep\s*\((\d+)\);/) {
|
|
||||||
if ($1 < 20) {
|
|
||||||
WARN("msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\n" . $line);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2716,24 +2392,17 @@ sub process {
|
||||||
if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) {
|
if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) {
|
||||||
ERROR("exactly one space required after that #$1\n" . $herecurr);
|
ERROR("exactly one space required after that #$1\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for spinlock_t definitions without a comment.
|
|
||||||
if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
|
|
||||||
$line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
|
|
||||||
my $which = $1;
|
|
||||||
if (!ctx_has_comment($first_line, $linenr)) {
|
|
||||||
CHK("$1 definition without comment\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
# check for memory barriers without a comment.
|
# check for memory barriers without a comment.
|
||||||
if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
|
if ($line =~ /\b(smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
|
||||||
if (!ctx_has_comment($first_line, $linenr)) {
|
if (!ctx_has_comment($first_line, $linenr)) {
|
||||||
CHK("memory barrier without comment\n" . $herecurr);
|
WARN("memory barrier without comment\n" . $herecurr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
# check of hardware specific defines
|
# check of hardware specific defines
|
||||||
if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
|
# we have e.g. CONFIG_LINUX and CONFIG_WIN32 for common cases
|
||||||
CHK("architecture specific defines should be avoided\n" . $herecurr);
|
# where they might be necessary.
|
||||||
|
if ($line =~ m@^.\s*\#\s*if.*\b__@) {
|
||||||
|
WARN("architecture specific defines should be avoided\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check that the storage class is at the beginning of a declaration
|
# Check that the storage class is at the beginning of a declaration
|
||||||
|
@ -2748,11 +2417,6 @@ sub process {
|
||||||
ERROR("inline keyword should sit between storage class and type\n" . $herecurr);
|
ERROR("inline keyword should sit between storage class and type\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check for __inline__ and __inline, prefer inline
|
|
||||||
if ($line =~ /\b(__inline__|__inline)\b/) {
|
|
||||||
WARN("plain inline is preferred over $1\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for sizeof(&)
|
# check for sizeof(&)
|
||||||
if ($line =~ /\bsizeof\s*\(\s*\&/) {
|
if ($line =~ /\bsizeof\s*\(\s*\&/) {
|
||||||
WARN("sizeof(& should be avoided\n" . $herecurr);
|
WARN("sizeof(& should be avoided\n" . $herecurr);
|
||||||
|
@ -2785,98 +2449,55 @@ sub process {
|
||||||
WARN("externs should be avoided in .c files\n" . $herecurr);
|
WARN("externs should be avoided in .c files\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# checks for new __setup's
|
# check for pointless casting of g_malloc return
|
||||||
if ($rawline =~ /\b__setup\("([^"]*)"/) {
|
if ($line =~ /\*\s*\)\s*g_(try)?(m|re)alloc(0?)(_n)?\b/) {
|
||||||
my $name = $1;
|
if ($2 == 'm') {
|
||||||
|
WARN("unnecessary cast may hide bugs, use g_$1new$3 instead\n" . $herecurr);
|
||||||
if (!grep(/$name/, @setup_docs)) {
|
} else {
|
||||||
CHK("__setup appears un-documented -- check Documentation/kernel-parameters.txt\n" . $herecurr);
|
WARN("unnecessary cast may hide bugs, use g_$1renew$3 instead\n" . $herecurr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for pointless casting of kmalloc return
|
|
||||||
if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) {
|
|
||||||
WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for gcc specific __FUNCTION__
|
# check for gcc specific __FUNCTION__
|
||||||
if ($line =~ /__FUNCTION__/) {
|
if ($line =~ /__FUNCTION__/) {
|
||||||
WARN("__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr);
|
WARN("__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for semaphores used as mutexes
|
# recommend qemu_strto* over strto*
|
||||||
if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
|
if ($line =~ /\b(strto.*?)\s*\(/) {
|
||||||
WARN("mutexes are preferred for single holder semaphores\n" . $herecurr);
|
WARN("consider using qemu_$1 in preference to $1\n" . $herecurr);
|
||||||
}
|
}
|
||||||
# check for semaphores used as mutexes
|
# check for module_init(), use category-specific init macros explicitly please
|
||||||
if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
|
if ($line =~ /^module_init\s*\(/) {
|
||||||
WARN("consider using a completion\n" . $herecurr);
|
WARN("please use block_init(), type_init() etc. instead of module_init()\n" . $herecurr);
|
||||||
|
|
||||||
}
|
|
||||||
# recommend strict_strto* over simple_strto*
|
|
||||||
if ($line =~ /\bsimple_(strto.*?)\s*\(/) {
|
|
||||||
WARN("consider using strict_$1 in preference to simple_$1\n" . $herecurr);
|
|
||||||
}
|
|
||||||
# check for __initcall(), use device_initcall() explicitly please
|
|
||||||
if ($line =~ /^.\s*__initcall\s*\(/) {
|
|
||||||
WARN("please use device_initcall() instead of __initcall()\n" . $herecurr);
|
|
||||||
}
|
}
|
||||||
# check for various ops structs, ensure they are const.
|
# check for various ops structs, ensure they are const.
|
||||||
my $struct_ops = qr{acpi_dock_ops|
|
my $struct_ops = qr{AIOCBInfo|
|
||||||
address_space_operations|
|
BdrvActionOps|
|
||||||
backlight_ops|
|
BlockDevOps|
|
||||||
block_device_operations|
|
BlockJobDriver|
|
||||||
dentry_operations|
|
DisplayChangeListenerOps|
|
||||||
dev_pm_ops|
|
GraphicHwOps|
|
||||||
dma_map_ops|
|
IDEDMAOps|
|
||||||
extent_io_ops|
|
KVMCapabilityInfo|
|
||||||
file_lock_operations|
|
MemoryRegionIOMMUOps|
|
||||||
file_operations|
|
MemoryRegionOps|
|
||||||
hv_ops|
|
MemoryRegionPortio|
|
||||||
ide_dma_ops|
|
QEMUFileOps|
|
||||||
intel_dvo_dev_ops|
|
SCSIBusInfo|
|
||||||
item_operations|
|
SCSIReqOps|
|
||||||
iwl_ops|
|
Spice[A-Z][a-zA-Z0-9]*Interface|
|
||||||
kgdb_arch|
|
TPMDriverOps|
|
||||||
kgdb_io|
|
USBDesc[A-Z][a-zA-Z0-9]*|
|
||||||
kset_uevent_ops|
|
VhostOps|
|
||||||
lock_manager_operations|
|
VMStateDescription|
|
||||||
microcode_ops|
|
VMStateInfo}x;
|
||||||
mtrr_ops|
|
|
||||||
neigh_ops|
|
|
||||||
nlmsvc_binding|
|
|
||||||
pci_raw_ops|
|
|
||||||
pipe_buf_operations|
|
|
||||||
platform_hibernation_ops|
|
|
||||||
platform_suspend_ops|
|
|
||||||
proto_ops|
|
|
||||||
rpc_pipe_ops|
|
|
||||||
seq_operations|
|
|
||||||
snd_ac97_build_ops|
|
|
||||||
soc_pcmcia_socket_ops|
|
|
||||||
stacktrace_ops|
|
|
||||||
sysfs_ops|
|
|
||||||
tty_operations|
|
|
||||||
usb_mon_operations|
|
|
||||||
wd_ops}x;
|
|
||||||
if ($line !~ /\bconst\b/ &&
|
if ($line !~ /\bconst\b/ &&
|
||||||
$line =~ /\bstruct\s+($struct_ops)\b/) {
|
$line =~ /\b($struct_ops)\b/) {
|
||||||
WARN("struct $1 should normally be const\n" .
|
WARN("struct $1 should normally be const\n" .
|
||||||
$herecurr);
|
$herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# use of NR_CPUS is usually wrong
|
|
||||||
# ignore definitions of NR_CPUS and usage to define arrays as likely right
|
|
||||||
if ($line =~ /\bNR_CPUS\b/ &&
|
|
||||||
$line !~ /^.\s*\s*#\s*if\b.*\bNR_CPUS\b/ &&
|
|
||||||
$line !~ /^.\s*\s*#\s*define\b.*\bNR_CPUS\b/ &&
|
|
||||||
$line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ &&
|
|
||||||
$line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ &&
|
|
||||||
$line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/)
|
|
||||||
{
|
|
||||||
WARN("usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for %L{u,d,i} in strings
|
# check for %L{u,d,i} in strings
|
||||||
my $string;
|
my $string;
|
||||||
while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
|
while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
|
||||||
|
@ -2888,25 +2509,6 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# whine mightly about in_atomic
|
|
||||||
if ($line =~ /\bin_atomic\s*\(/) {
|
|
||||||
if ($realfile =~ m@^drivers/@) {
|
|
||||||
ERROR("do not use in_atomic in drivers\n" . $herecurr);
|
|
||||||
} elsif ($realfile !~ m@^kernel/@) {
|
|
||||||
WARN("use of in_atomic() is incorrect outside core kernel code\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for lockdep_set_novalidate_class
|
|
||||||
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
|
|
||||||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
|
|
||||||
if ($realfile !~ m@^kernel/lockdep@ &&
|
|
||||||
$realfile !~ m@^include/linux/lockdep@ &&
|
|
||||||
$realfile !~ m@^drivers/base/core@) {
|
|
||||||
ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# QEMU specific tests
|
# QEMU specific tests
|
||||||
if ($rawline =~ /\b(?:Qemu|QEmu)\b/) {
|
if ($rawline =~ /\b(?:Qemu|QEmu)\b/) {
|
||||||
WARN("use QEMU instead of Qemu or QEmu\n" . $herecurr);
|
WARN("use QEMU instead of Qemu or QEmu\n" . $herecurr);
|
||||||
|
|
|
@ -0,0 +1,119 @@
|
||||||
|
/* Macro file for Coccinelle
|
||||||
|
*
|
||||||
|
* Copyright (C) 2015 Red Hat, Inc.
|
||||||
|
*
|
||||||
|
* Authors:
|
||||||
|
* Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU GPL, version 2 or, at your
|
||||||
|
* option, any later version. See the COPYING file in the top-level directory.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Coccinelle only does limited parsing of headers, and chokes on some idioms
|
||||||
|
* defined in compiler.h and queue.h. Macros that Coccinelle must know about
|
||||||
|
* in order to parse .c files must be in a separate macro file---which is
|
||||||
|
* exactly what you're staring at now.
|
||||||
|
*
|
||||||
|
* To use this file, add the "--macro-file scripts/cocci-macro-file.h" to the
|
||||||
|
* Coccinelle command line.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* From qemu/compiler.h */
|
||||||
|
#define QEMU_GNUC_PREREQ(maj, min) 1
|
||||||
|
#define QEMU_NORETURN __attribute__ ((__noreturn__))
|
||||||
|
#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||||
|
#define QEMU_SENTINEL __attribute__((sentinel))
|
||||||
|
#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial))
|
||||||
|
#define QEMU_PACKED __attribute__((gcc_struct, packed))
|
||||||
|
|
||||||
|
#define cat(x,y) x ## y
|
||||||
|
#define cat2(x,y) cat(x,y)
|
||||||
|
#define QEMU_BUILD_BUG_ON(x) \
|
||||||
|
typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused));
|
||||||
|
|
||||||
|
#define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m)))
|
||||||
|
|
||||||
|
#define xglue(x, y) x ## y
|
||||||
|
#define glue(x, y) xglue(x, y)
|
||||||
|
#define stringify(s) tostring(s)
|
||||||
|
#define tostring(s) #s
|
||||||
|
|
||||||
|
#define typeof_field(type, field) typeof(((type *)0)->field)
|
||||||
|
#define type_check(t1,t2) ((t1*)0 - (t2*)0)
|
||||||
|
|
||||||
|
/* From qemu/queue.h */
|
||||||
|
|
||||||
|
#define QLIST_HEAD(name, type) \
|
||||||
|
struct name { \
|
||||||
|
struct type *lh_first; /* first element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define QLIST_HEAD_INITIALIZER(head) \
|
||||||
|
{ NULL }
|
||||||
|
|
||||||
|
#define QLIST_ENTRY(type) \
|
||||||
|
struct { \
|
||||||
|
struct type *le_next; /* next element */ \
|
||||||
|
struct type **le_prev; /* address of previous next element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Singly-linked List definitions.
|
||||||
|
*/
|
||||||
|
#define QSLIST_HEAD(name, type) \
|
||||||
|
struct name { \
|
||||||
|
struct type *slh_first; /* first element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define QSLIST_HEAD_INITIALIZER(head) \
|
||||||
|
{ NULL }
|
||||||
|
|
||||||
|
#define QSLIST_ENTRY(type) \
|
||||||
|
struct { \
|
||||||
|
struct type *sle_next; /* next element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Simple queue definitions.
|
||||||
|
*/
|
||||||
|
#define QSIMPLEQ_HEAD(name, type) \
|
||||||
|
struct name { \
|
||||||
|
struct type *sqh_first; /* first element */ \
|
||||||
|
struct type **sqh_last; /* addr of last next element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define QSIMPLEQ_HEAD_INITIALIZER(head) \
|
||||||
|
{ NULL, &(head).sqh_first }
|
||||||
|
|
||||||
|
#define QSIMPLEQ_ENTRY(type) \
|
||||||
|
struct { \
|
||||||
|
struct type *sqe_next; /* next element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tail queue definitions.
|
||||||
|
*/
|
||||||
|
#define Q_TAILQ_HEAD(name, type, qual) \
|
||||||
|
struct name { \
|
||||||
|
qual type *tqh_first; /* first element */ \
|
||||||
|
qual type *qual *tqh_last; /* addr of last next element */ \
|
||||||
|
}
|
||||||
|
#define QTAILQ_HEAD(name, type) \
|
||||||
|
struct name { \
|
||||||
|
type *tqh_first; /* first element */ \
|
||||||
|
type **tqh_last; /* addr of last next element */ \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define QTAILQ_HEAD_INITIALIZER(head) \
|
||||||
|
{ NULL, &(head).tqh_first }
|
||||||
|
|
||||||
|
#define Q_TAILQ_ENTRY(type, qual) \
|
||||||
|
struct { \
|
||||||
|
qual type *tqe_next; /* next element */ \
|
||||||
|
qual type *qual *tqe_prev; /* address of previous next element */\
|
||||||
|
}
|
||||||
|
#define QTAILQ_ENTRY(type) \
|
||||||
|
struct { \
|
||||||
|
type *tqe_next; /* next element */ \
|
||||||
|
type **tqe_prev; /* address of previous next element */ \
|
||||||
|
}
|
|
@ -118,7 +118,7 @@ shape and this command should mostly work."""
|
||||||
def qemu_get_ram_block(self, ram_addr):
|
def qemu_get_ram_block(self, ram_addr):
|
||||||
ram_blocks = gdb.parse_and_eval("ram_list.blocks")
|
ram_blocks = gdb.parse_and_eval("ram_list.blocks")
|
||||||
for block in self.qlist_foreach(ram_blocks, "next"):
|
for block in self.qlist_foreach(ram_blocks, "next"):
|
||||||
if (ram_addr - block["offset"] < block["length"]):
|
if (ram_addr - block["offset"] < block["used_length"]):
|
||||||
return block
|
return block
|
||||||
raise gdb.GdbError("Bad ram offset %x" % ram_addr)
|
raise gdb.GdbError("Bad ram offset %x" % ram_addr)
|
||||||
|
|
||||||
|
|
|
@ -1318,6 +1318,9 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
|
||||||
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
|
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
|
||||||
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
|
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
|
||||||
|
|
||||||
|
/* mem_helper.c */
|
||||||
|
void helper_lock_init(void);
|
||||||
|
|
||||||
/* svm_helper.c */
|
/* svm_helper.c */
|
||||||
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
|
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
|
||||||
uint64_t param);
|
uint64_t param);
|
||||||
|
|
|
@ -23,18 +23,37 @@
|
||||||
|
|
||||||
/* broken thread support */
|
/* broken thread support */
|
||||||
|
|
||||||
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
|
#if defined(CONFIG_USER_ONLY)
|
||||||
|
QemuMutex global_cpu_lock;
|
||||||
|
|
||||||
void helper_lock(void)
|
void helper_lock(void)
|
||||||
{
|
{
|
||||||
spin_lock(&global_cpu_lock);
|
qemu_mutex_lock(&global_cpu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_unlock(void)
|
void helper_unlock(void)
|
||||||
{
|
{
|
||||||
spin_unlock(&global_cpu_lock);
|
qemu_mutex_unlock(&global_cpu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void helper_lock_init(void)
|
||||||
|
{
|
||||||
|
qemu_mutex_init(&global_cpu_lock);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void helper_lock(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void helper_unlock(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void helper_lock_init(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
||||||
{
|
{
|
||||||
uint64_t d;
|
uint64_t d;
|
||||||
|
|
|
@ -7898,6 +7898,8 @@ void optimize_flags_init(void)
|
||||||
offsetof(CPUX86State, regs[i]),
|
offsetof(CPUX86State, regs[i]),
|
||||||
reg_names[i]);
|
reg_names[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
helper_lock_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||||
|
|
|
@ -1796,13 +1796,6 @@ static bool is_special_wait_psw(CPUState *cs)
|
||||||
return cs->kvm_run->psw_addr == 0xfffUL;
|
return cs->kvm_run->psw_addr == 0xfffUL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void guest_panicked(void)
|
|
||||||
{
|
|
||||||
qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
|
|
||||||
&error_abort);
|
|
||||||
vm_stop(RUN_STATE_GUEST_PANICKED);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
|
static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
|
||||||
{
|
{
|
||||||
CPUState *cs = CPU(cpu);
|
CPUState *cs = CPU(cpu);
|
||||||
|
@ -1811,7 +1804,7 @@ static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
|
||||||
str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
|
str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
|
||||||
ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
|
ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
|
||||||
s390_cpu_halt(cpu);
|
s390_cpu_halt(cpu);
|
||||||
guest_panicked();
|
qemu_system_guest_panicked();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int handle_intercept(S390CPU *cpu)
|
static int handle_intercept(S390CPU *cpu)
|
||||||
|
@ -1844,7 +1837,7 @@ static int handle_intercept(S390CPU *cpu)
|
||||||
if (is_special_wait_psw(cs)) {
|
if (is_special_wait_psw(cs)) {
|
||||||
qemu_system_shutdown_request();
|
qemu_system_shutdown_request();
|
||||||
} else {
|
} else {
|
||||||
guest_panicked();
|
qemu_system_guest_panicked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r = EXCP_HALTED;
|
r = EXCP_HALTED;
|
||||||
|
|
|
@ -595,6 +595,10 @@ void *tcg_malloc_internal(TCGContext *s, int size);
|
||||||
void tcg_pool_reset(TCGContext *s);
|
void tcg_pool_reset(TCGContext *s);
|
||||||
void tcg_pool_delete(TCGContext *s);
|
void tcg_pool_delete(TCGContext *s);
|
||||||
|
|
||||||
|
void tb_lock(void);
|
||||||
|
void tb_unlock(void);
|
||||||
|
void tb_lock_reset(void);
|
||||||
|
|
||||||
static inline void *tcg_malloc(int size)
|
static inline void *tcg_malloc(int size)
|
||||||
{
|
{
|
||||||
TCGContext *s = &tcg_ctx;
|
TCGContext *s = &tcg_ctx;
|
||||||
|
|
1255
tests/test-cutils.c
1255
tests/test-cutils.c
File diff suppressed because it is too large
Load Diff
|
@ -122,13 +122,45 @@ uintptr_t qemu_real_host_page_mask;
|
||||||
uintptr_t qemu_host_page_size;
|
uintptr_t qemu_host_page_size;
|
||||||
uintptr_t qemu_host_page_mask;
|
uintptr_t qemu_host_page_mask;
|
||||||
|
|
||||||
/* This is a multi-level map on the virtual address space.
|
/* The bottom level has pointers to PageDesc */
|
||||||
The bottom level has pointers to PageDesc. */
|
|
||||||
static void *l1_map[V_L1_SIZE];
|
static void *l1_map[V_L1_SIZE];
|
||||||
|
|
||||||
/* code generation context */
|
/* code generation context */
|
||||||
TCGContext tcg_ctx;
|
TCGContext tcg_ctx;
|
||||||
|
|
||||||
|
/* translation block context */
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
__thread int have_tb_lock;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void tb_lock(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
assert(!have_tb_lock);
|
||||||
|
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
|
have_tb_lock++;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void tb_unlock(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
assert(have_tb_lock);
|
||||||
|
have_tb_lock--;
|
||||||
|
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void tb_lock_reset(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
if (have_tb_lock) {
|
||||||
|
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
|
have_tb_lock = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||||
tb_page_addr_t phys_page2);
|
tb_page_addr_t phys_page2);
|
||||||
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
|
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
|
||||||
|
@ -139,11 +171,13 @@ void cpu_gen_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return non zero if the very first instruction is invalid so that
|
/* return non zero if the very first instruction is invalid so that
|
||||||
the virtual CPU can trigger an exception.
|
* the virtual CPU can trigger an exception.
|
||||||
|
*
|
||||||
'*gen_code_size_ptr' contains the size of the generated code (host
|
* '*gen_code_size_ptr' contains the size of the generated code (host
|
||||||
code).
|
* code).
|
||||||
*/
|
*
|
||||||
|
* Called with mmap_lock held for user-mode emulation.
|
||||||
|
*/
|
||||||
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
|
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
|
||||||
{
|
{
|
||||||
TCGContext *s = &tcg_ctx;
|
TCGContext *s = &tcg_ctx;
|
||||||
|
@ -388,6 +422,9 @@ static void page_init(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If alloc=1:
|
||||||
|
* Called with mmap_lock held for user-mode emulation.
|
||||||
|
*/
|
||||||
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
{
|
{
|
||||||
PageDesc *pd;
|
PageDesc *pd;
|
||||||
|
@ -399,26 +436,26 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
|
|
||||||
/* Level 2..N-1. */
|
/* Level 2..N-1. */
|
||||||
for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
|
for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
|
||||||
void **p = *lp;
|
void **p = atomic_rcu_read(lp);
|
||||||
|
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
if (!alloc) {
|
if (!alloc) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
p = g_new0(void *, V_L2_SIZE);
|
p = g_new0(void *, V_L2_SIZE);
|
||||||
*lp = p;
|
atomic_rcu_set(lp, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
pd = *lp;
|
pd = atomic_rcu_read(lp);
|
||||||
if (pd == NULL) {
|
if (pd == NULL) {
|
||||||
if (!alloc) {
|
if (!alloc) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||||
*lp = pd;
|
atomic_rcu_set(lp, pd);
|
||||||
}
|
}
|
||||||
|
|
||||||
return pd + (index & (V_L2_SIZE - 1));
|
return pd + (index & (V_L2_SIZE - 1));
|
||||||
|
@ -429,11 +466,6 @@ static inline PageDesc *page_find(tb_page_addr_t index)
|
||||||
return page_find_alloc(index, 0);
|
return page_find_alloc(index, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
#define mmap_lock() do { } while (0)
|
|
||||||
#define mmap_unlock() do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
/* Currently it is not recommended to allocate big chunks of data in
|
/* Currently it is not recommended to allocate big chunks of data in
|
||||||
user mode. It will change when a dedicated libc will be used. */
|
user mode. It will change when a dedicated libc will be used. */
|
||||||
|
@ -676,6 +708,7 @@ static inline void code_gen_alloc(size_t tb_size)
|
||||||
CODE_GEN_AVG_BLOCK_SIZE;
|
CODE_GEN_AVG_BLOCK_SIZE;
|
||||||
tcg_ctx.tb_ctx.tbs =
|
tcg_ctx.tb_ctx.tbs =
|
||||||
g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
|
g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
|
||||||
|
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
||||||
|
@ -994,6 +1027,7 @@ static void build_page_bitmap(PageDesc *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called with mmap_lock held for user mode emulation. */
|
||||||
TranslationBlock *tb_gen_code(CPUState *cpu,
|
TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||||
target_ulong pc, target_ulong cs_base,
|
target_ulong pc, target_ulong cs_base,
|
||||||
int flags, int cflags)
|
int flags, int cflags)
|
||||||
|
@ -1041,6 +1075,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
* 'is_cpu_write_access' should be true if called from a real cpu write
|
||||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||||
* this TB.
|
* this TB.
|
||||||
|
*
|
||||||
|
* Called with mmap_lock held for user-mode emulation
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
||||||
{
|
{
|
||||||
|
@ -1057,6 +1093,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
||||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
* 'is_cpu_write_access' should be true if called from a real cpu write
|
||||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
* access: the virtual CPU will exit the current TB if code is modified inside
|
||||||
* this TB.
|
* this TB.
|
||||||
|
*
|
||||||
|
* Called with mmap_lock held for user-mode emulation
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||||
int is_cpu_write_access)
|
int is_cpu_write_access)
|
||||||
|
@ -1205,6 +1243,7 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_SOFTMMU)
|
#if !defined(CONFIG_SOFTMMU)
|
||||||
|
/* Called with mmap_lock held. */
|
||||||
static void tb_invalidate_phys_page(tb_page_addr_t addr,
|
static void tb_invalidate_phys_page(tb_page_addr_t addr,
|
||||||
uintptr_t pc, void *puc,
|
uintptr_t pc, void *puc,
|
||||||
bool locked)
|
bool locked)
|
||||||
|
@ -1274,7 +1313,10 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* add the tb in the target page and protect it if necessary */
|
/* add the tb in the target page and protect it if necessary
|
||||||
|
*
|
||||||
|
* Called with mmap_lock held for user-mode emulation.
|
||||||
|
*/
|
||||||
static inline void tb_alloc_page(TranslationBlock *tb,
|
static inline void tb_alloc_page(TranslationBlock *tb,
|
||||||
unsigned int n, tb_page_addr_t page_addr)
|
unsigned int n, tb_page_addr_t page_addr)
|
||||||
{
|
{
|
||||||
|
@ -1330,16 +1372,16 @@ static inline void tb_alloc_page(TranslationBlock *tb,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* add a new TB and link it to the physical page tables. phys_page2 is
|
/* add a new TB and link it to the physical page tables. phys_page2 is
|
||||||
(-1) to indicate that only one page contains the TB. */
|
* (-1) to indicate that only one page contains the TB.
|
||||||
|
*
|
||||||
|
* Called with mmap_lock held for user-mode emulation.
|
||||||
|
*/
|
||||||
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||||
tb_page_addr_t phys_page2)
|
tb_page_addr_t phys_page2)
|
||||||
{
|
{
|
||||||
unsigned int h;
|
unsigned int h;
|
||||||
TranslationBlock **ptb;
|
TranslationBlock **ptb;
|
||||||
|
|
||||||
/* Grab the mmap lock to stop another thread invalidating this TB
|
|
||||||
before we are done. */
|
|
||||||
mmap_lock();
|
|
||||||
/* add in the physical hash table */
|
/* add in the physical hash table */
|
||||||
h = tb_phys_hash_func(phys_pc);
|
h = tb_phys_hash_func(phys_pc);
|
||||||
ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
|
ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
|
||||||
|
@ -1369,7 +1411,6 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||||
#ifdef DEBUG_TB_CHECK
|
#ifdef DEBUG_TB_CHECK
|
||||||
tb_page_check();
|
tb_page_check();
|
||||||
#endif
|
#endif
|
||||||
mmap_unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
|
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
|
||||||
|
|
150
util/cutils.c
150
util/cutils.c
|
@ -353,6 +353,156 @@ int64_t strtosz(const char *nptr, char **end)
|
||||||
return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB);
|
return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function for qemu_strto*l() functions.
|
||||||
|
*/
|
||||||
|
static int check_strtox_error(const char *p, char *endptr, const char **next,
|
||||||
|
int err)
|
||||||
|
{
|
||||||
|
/* If no conversion was performed, prefer BSD behavior over glibc
|
||||||
|
* behavior.
|
||||||
|
*/
|
||||||
|
if (err == 0 && endptr == p) {
|
||||||
|
err = EINVAL;
|
||||||
|
}
|
||||||
|
if (!next && *endptr) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (next) {
|
||||||
|
*next = endptr;
|
||||||
|
}
|
||||||
|
return -err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* QEMU wrappers for strtol(), strtoll(), strtoul(), strotull() C functions.
|
||||||
|
*
|
||||||
|
* Convert ASCII string @nptr to a long integer value
|
||||||
|
* from the given @base. Parameters @nptr, @endptr, @base
|
||||||
|
* follows same semantics as strtol() C function.
|
||||||
|
*
|
||||||
|
* Unlike from strtol() function, if @endptr is not NULL, this
|
||||||
|
* function will return -EINVAL whenever it cannot fully convert
|
||||||
|
* the string in @nptr with given @base to a long. This function returns
|
||||||
|
* the result of the conversion only through the @result parameter.
|
||||||
|
*
|
||||||
|
* If NULL is passed in @endptr, then the whole string in @ntpr
|
||||||
|
* is a number otherwise it returns -EINVAL.
|
||||||
|
*
|
||||||
|
* RETURN VALUE
|
||||||
|
* Unlike from strtol() function, this wrapper returns either
|
||||||
|
* -EINVAL or the errno set by strtol() function (e.g -ERANGE).
|
||||||
|
* If the conversion overflows, -ERANGE is returned, and @result
|
||||||
|
* is set to the max value of the desired type
|
||||||
|
* (e.g. LONG_MAX, LLONG_MAX, ULONG_MAX, ULLONG_MAX). If the case
|
||||||
|
* of underflow, -ERANGE is returned, and @result is set to the min
|
||||||
|
* value of the desired type. For strtol(), strtoll(), @result is set to
|
||||||
|
* LONG_MIN, LLONG_MIN, respectively, and for strtoul(), strtoull() it
|
||||||
|
* is set to 0.
|
||||||
|
*/
|
||||||
|
int qemu_strtol(const char *nptr, const char **endptr, int base,
|
||||||
|
long *result)
|
||||||
|
{
|
||||||
|
char *p;
|
||||||
|
int err = 0;
|
||||||
|
if (!nptr) {
|
||||||
|
if (endptr) {
|
||||||
|
*endptr = nptr;
|
||||||
|
}
|
||||||
|
err = -EINVAL;
|
||||||
|
} else {
|
||||||
|
errno = 0;
|
||||||
|
*result = strtol(nptr, &p, base);
|
||||||
|
err = check_strtox_error(nptr, p, endptr, errno);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts ASCII string to an unsigned long integer.
|
||||||
|
*
|
||||||
|
* If string contains a negative number, value will be converted to
|
||||||
|
* the unsigned representation of the signed value, unless the original
|
||||||
|
* (nonnegated) value would overflow, in this case, it will set @result
|
||||||
|
* to ULONG_MAX, and return ERANGE.
|
||||||
|
*
|
||||||
|
* The same behavior holds, for qemu_strtoull() but sets @result to
|
||||||
|
* ULLONG_MAX instead of ULONG_MAX.
|
||||||
|
*
|
||||||
|
* See qemu_strtol() documentation for more info.
|
||||||
|
*/
|
||||||
|
int qemu_strtoul(const char *nptr, const char **endptr, int base,
|
||||||
|
unsigned long *result)
|
||||||
|
{
|
||||||
|
char *p;
|
||||||
|
int err = 0;
|
||||||
|
if (!nptr) {
|
||||||
|
if (endptr) {
|
||||||
|
*endptr = nptr;
|
||||||
|
}
|
||||||
|
err = -EINVAL;
|
||||||
|
} else {
|
||||||
|
errno = 0;
|
||||||
|
*result = strtoul(nptr, &p, base);
|
||||||
|
/* Windows returns 1 for negative out-of-range values. */
|
||||||
|
if (errno == ERANGE) {
|
||||||
|
*result = -1;
|
||||||
|
}
|
||||||
|
err = check_strtox_error(nptr, p, endptr, errno);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts ASCII string to a long long integer.
|
||||||
|
*
|
||||||
|
* See qemu_strtol() documentation for more info.
|
||||||
|
*/
|
||||||
|
int qemu_strtoll(const char *nptr, const char **endptr, int base,
|
||||||
|
int64_t *result)
|
||||||
|
{
|
||||||
|
char *p;
|
||||||
|
int err = 0;
|
||||||
|
if (!nptr) {
|
||||||
|
if (endptr) {
|
||||||
|
*endptr = nptr;
|
||||||
|
}
|
||||||
|
err = -EINVAL;
|
||||||
|
} else {
|
||||||
|
errno = 0;
|
||||||
|
*result = strtoll(nptr, &p, base);
|
||||||
|
err = check_strtox_error(nptr, p, endptr, errno);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts ASCII string to an unsigned long long integer.
|
||||||
|
*
|
||||||
|
* See qemu_strtol() documentation for more info.
|
||||||
|
*/
|
||||||
|
int qemu_strtoull(const char *nptr, const char **endptr, int base,
|
||||||
|
uint64_t *result)
|
||||||
|
{
|
||||||
|
char *p;
|
||||||
|
int err = 0;
|
||||||
|
if (!nptr) {
|
||||||
|
if (endptr) {
|
||||||
|
*endptr = nptr;
|
||||||
|
}
|
||||||
|
err = -EINVAL;
|
||||||
|
} else {
|
||||||
|
errno = 0;
|
||||||
|
*result = strtoull(nptr, &p, base);
|
||||||
|
/* Windows returns 1 for negative out-of-range values. */
|
||||||
|
if (errno == ERANGE) {
|
||||||
|
*result = -1;
|
||||||
|
}
|
||||||
|
err = check_strtox_error(nptr, p, endptr, errno);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* parse_uint:
|
* parse_uint:
|
||||||
*
|
*
|
||||||
|
|
|
@ -298,7 +298,16 @@ static inline void futex_wake(QemuEvent *ev, int n)
|
||||||
|
|
||||||
static inline void futex_wait(QemuEvent *ev, unsigned val)
|
static inline void futex_wait(QemuEvent *ev, unsigned val)
|
||||||
{
|
{
|
||||||
futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
|
while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
|
||||||
|
switch (errno) {
|
||||||
|
case EWOULDBLOCK:
|
||||||
|
return;
|
||||||
|
case EINTR:
|
||||||
|
break; /* get out of switch and retry */
|
||||||
|
default:
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void futex_wake(QemuEvent *ev, int n)
|
static inline void futex_wake(QemuEvent *ev, int n)
|
||||||
|
|
|
@ -335,6 +335,11 @@ static void rcu_init_unlock(void)
|
||||||
qemu_mutex_unlock(&rcu_registry_lock);
|
qemu_mutex_unlock(&rcu_registry_lock);
|
||||||
qemu_mutex_unlock(&rcu_sync_lock);
|
qemu_mutex_unlock(&rcu_sync_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rcu_init_child(void)
|
||||||
|
{
|
||||||
|
qemu_mutex_init(&rcu_registry_lock);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void rcu_after_fork(void)
|
void rcu_after_fork(void)
|
||||||
|
@ -346,7 +351,7 @@ void rcu_after_fork(void)
|
||||||
static void __attribute__((__constructor__)) rcu_init(void)
|
static void __attribute__((__constructor__)) rcu_init(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_POSIX
|
#ifdef CONFIG_POSIX
|
||||||
pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock);
|
pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
|
||||||
#endif
|
#endif
|
||||||
rcu_init_complete();
|
rcu_init_complete();
|
||||||
}
|
}
|
||||||
|
|
6
vl.c
6
vl.c
|
@ -1745,6 +1745,12 @@ void qemu_system_reset(bool report)
|
||||||
cpu_synchronize_all_post_reset();
|
cpu_synchronize_all_post_reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void qemu_system_guest_panicked(void)
|
||||||
|
{
|
||||||
|
qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, &error_abort);
|
||||||
|
vm_stop(RUN_STATE_GUEST_PANICKED);
|
||||||
|
}
|
||||||
|
|
||||||
void qemu_system_reset_request(void)
|
void qemu_system_reset_request(void)
|
||||||
{
|
{
|
||||||
if (no_reboot) {
|
if (no_reboot) {
|
||||||
|
|
Loading…
Reference in New Issue