mirror of https://github.com/xemu-project/xemu.git
main-loop: use qemu_mutex_lock_iothread consistently
The next patch will require the BQL to be always taken with qemu_mutex_lock_iothread(), while right now this isn't the case. Outside TCG mode this is not a problem. In TCG mode, we need to be careful and avoid the "prod out of compiled code" step if already in a VCPU thread. This is easily done with a check on current_cpu, i.e. qemu_in_vcpu_thread(). Hopefully, multithreaded TCG will get rid of the whole logic to kick VCPUs whenever an I/O event occurs! Cc: Frederic Konrad <fred.konrad@greensocs.com> Message-Id: <1434646046-27150-2-git-send-email-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
bdf026317d
commit
2e7f7a3c86
10
cpus.c
10
cpus.c
|
@ -954,7 +954,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
|
||||||
CPUState *cpu = arg;
|
CPUState *cpu = arg;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
qemu_mutex_lock(&qemu_global_mutex);
|
qemu_mutex_lock_iothread();
|
||||||
qemu_thread_get_self(cpu->thread);
|
qemu_thread_get_self(cpu->thread);
|
||||||
cpu->thread_id = qemu_get_thread_id();
|
cpu->thread_id = qemu_get_thread_id();
|
||||||
cpu->can_do_io = 1;
|
cpu->can_do_io = 1;
|
||||||
|
@ -1034,10 +1034,10 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||||
{
|
{
|
||||||
CPUState *cpu = arg;
|
CPUState *cpu = arg;
|
||||||
|
|
||||||
|
qemu_mutex_lock_iothread();
|
||||||
qemu_tcg_init_cpu_signals();
|
qemu_tcg_init_cpu_signals();
|
||||||
qemu_thread_get_self(cpu->thread);
|
qemu_thread_get_self(cpu->thread);
|
||||||
|
|
||||||
qemu_mutex_lock(&qemu_global_mutex);
|
|
||||||
CPU_FOREACH(cpu) {
|
CPU_FOREACH(cpu) {
|
||||||
cpu->thread_id = qemu_get_thread_id();
|
cpu->thread_id = qemu_get_thread_id();
|
||||||
cpu->created = true;
|
cpu->created = true;
|
||||||
|
@ -1149,7 +1149,11 @@ bool qemu_in_vcpu_thread(void)
|
||||||
void qemu_mutex_lock_iothread(void)
|
void qemu_mutex_lock_iothread(void)
|
||||||
{
|
{
|
||||||
atomic_inc(&iothread_requesting_mutex);
|
atomic_inc(&iothread_requesting_mutex);
|
||||||
if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
|
/* In the simple case there is no need to bump the VCPU thread out of
|
||||||
|
* TCG code execution.
|
||||||
|
*/
|
||||||
|
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
|
||||||
|
!first_cpu || !first_cpu->thread) {
|
||||||
qemu_mutex_lock(&qemu_global_mutex);
|
qemu_mutex_lock(&qemu_global_mutex);
|
||||||
atomic_dec(&iothread_requesting_mutex);
|
atomic_dec(&iothread_requesting_mutex);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue