linux-user: Use QemuMutex and QemuCond

Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond.
This will allow to make some locks and conditional variables common
between user and system mode emulation.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1470158864-17651-7-git-send-email-alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sergey Fedorov 2016-08-02 18:27:37 +01:00 committed by Paolo Bonzini
parent a5403c69fc
commit 959f593c0e
1 changed files with 32 additions and 23 deletions

View File

@ -111,17 +111,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
We don't require a full sync, only that no cpus are executing guest code. We don't require a full sync, only that no cpus are executing guest code.
The alternative is to map target atomic ops onto host equivalents, The alternative is to map target atomic ops onto host equivalents,
which requires quite a lot of per host/target work. */ which requires quite a lot of per host/target work. */
static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER; static QemuMutex cpu_list_lock;
static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; static QemuMutex exclusive_lock;
static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; static QemuCond exclusive_cond;
static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; static QemuCond exclusive_resume;
static int pending_cpus; static int pending_cpus;
void qemu_init_cpu_loop(void)
{
qemu_mutex_init(&cpu_list_lock);
qemu_mutex_init(&exclusive_lock);
qemu_cond_init(&exclusive_cond);
qemu_cond_init(&exclusive_resume);
}
/* Make sure everything is in a consistent state for calling fork(). */ /* Make sure everything is in a consistent state for calling fork(). */
void fork_start(void) void fork_start(void)
{ {
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
pthread_mutex_lock(&exclusive_lock); qemu_mutex_lock(&exclusive_lock);
mmap_fork_start(); mmap_fork_start();
} }
@ -138,14 +146,14 @@ void fork_end(int child)
} }
} }
pending_cpus = 0; pending_cpus = 0;
pthread_mutex_init(&exclusive_lock, NULL); qemu_mutex_init(&exclusive_lock);
pthread_mutex_init(&cpu_list_mutex, NULL); qemu_mutex_init(&cpu_list_lock);
pthread_cond_init(&exclusive_cond, NULL); qemu_cond_init(&exclusive_cond);
pthread_cond_init(&exclusive_resume, NULL); qemu_cond_init(&exclusive_resume);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
gdbserver_fork(thread_cpu); gdbserver_fork(thread_cpu);
} else { } else {
pthread_mutex_unlock(&exclusive_lock); qemu_mutex_unlock(&exclusive_lock);
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
} }
} }
@ -155,7 +163,7 @@ void fork_end(int child)
static inline void exclusive_idle(void) static inline void exclusive_idle(void)
{ {
while (pending_cpus) { while (pending_cpus) {
pthread_cond_wait(&exclusive_resume, &exclusive_lock); qemu_cond_wait(&exclusive_resume, &exclusive_lock);
} }
} }
@ -165,7 +173,7 @@ static inline void start_exclusive(void)
{ {
CPUState *other_cpu; CPUState *other_cpu;
pthread_mutex_lock(&exclusive_lock); qemu_mutex_lock(&exclusive_lock);
exclusive_idle(); exclusive_idle();
pending_cpus = 1; pending_cpus = 1;
@ -176,8 +184,8 @@ static inline void start_exclusive(void)
cpu_exit(other_cpu); cpu_exit(other_cpu);
} }
} }
if (pending_cpus > 1) { while (pending_cpus > 1) {
pthread_cond_wait(&exclusive_cond, &exclusive_lock); qemu_cond_wait(&exclusive_cond, &exclusive_lock);
} }
} }
@ -185,42 +193,42 @@ static inline void start_exclusive(void)
static inline void __attribute__((unused)) end_exclusive(void) static inline void __attribute__((unused)) end_exclusive(void)
{ {
pending_cpus = 0; pending_cpus = 0;
pthread_cond_broadcast(&exclusive_resume); qemu_cond_broadcast(&exclusive_resume);
pthread_mutex_unlock(&exclusive_lock); qemu_mutex_unlock(&exclusive_lock);
} }
/* Wait for exclusive ops to finish, and begin cpu execution. */ /* Wait for exclusive ops to finish, and begin cpu execution. */
static inline void cpu_exec_start(CPUState *cpu) static inline void cpu_exec_start(CPUState *cpu)
{ {
pthread_mutex_lock(&exclusive_lock); qemu_mutex_lock(&exclusive_lock);
exclusive_idle(); exclusive_idle();
cpu->running = true; cpu->running = true;
pthread_mutex_unlock(&exclusive_lock); qemu_mutex_unlock(&exclusive_lock);
} }
/* Mark cpu as not executing, and release pending exclusive ops. */ /* Mark cpu as not executing, and release pending exclusive ops. */
static inline void cpu_exec_end(CPUState *cpu) static inline void cpu_exec_end(CPUState *cpu)
{ {
pthread_mutex_lock(&exclusive_lock); qemu_mutex_lock(&exclusive_lock);
cpu->running = false; cpu->running = false;
if (pending_cpus > 1) { if (pending_cpus > 1) {
pending_cpus--; pending_cpus--;
if (pending_cpus == 1) { if (pending_cpus == 1) {
pthread_cond_signal(&exclusive_cond); qemu_cond_signal(&exclusive_cond);
} }
} }
exclusive_idle(); exclusive_idle();
pthread_mutex_unlock(&exclusive_lock); qemu_mutex_unlock(&exclusive_lock);
} }
void cpu_list_lock(void) void cpu_list_lock(void)
{ {
pthread_mutex_lock(&cpu_list_mutex); qemu_mutex_lock(&cpu_list_lock);
} }
void cpu_list_unlock(void) void cpu_list_unlock(void)
{ {
pthread_mutex_unlock(&cpu_list_mutex); qemu_mutex_unlock(&cpu_list_lock);
} }
@ -4211,6 +4219,7 @@ int main(int argc, char **argv, char **envp)
int ret; int ret;
int execfd; int execfd;
qemu_init_cpu_loop();
module_call_init(MODULE_INIT_QOM); module_call_init(MODULE_INIT_QOM);
if ((envlist = envlist_create()) == NULL) { if ((envlist = envlist_create()) == NULL) {