mirror of https://github.com/xemu-project/xemu.git
qemu: refactor main_loop (Marcelo Tosatti)
Break main loop into 3 main functions. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@7241 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
50317c7fa7
commit
43b9685807
298
vl.c
298
vl.c
|
@ -273,7 +273,7 @@ uint64_t node_cpumask[MAX_NODES];
|
||||||
|
|
||||||
static CPUState *cur_cpu;
|
static CPUState *cur_cpu;
|
||||||
static CPUState *next_cpu;
|
static CPUState *next_cpu;
|
||||||
static int event_pending = 1;
|
static int timer_alarm_pending = 1;
|
||||||
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
||||||
static int icount_time_shift;
|
static int icount_time_shift;
|
||||||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||||
|
@ -1360,7 +1360,7 @@ static void host_alarm_handler(int host_signum)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
event_pending = 1;
|
timer_alarm_pending = 1;
|
||||||
qemu_notify_event();
|
qemu_notify_event();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3879,153 +3879,175 @@ void main_loop_wait(int timeout)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int main_loop(void)
|
static int qemu_cpu_exec(CPUState *env)
|
||||||
{
|
{
|
||||||
int ret, timeout;
|
int ret;
|
||||||
#ifdef CONFIG_PROFILER
|
#ifdef CONFIG_PROFILER
|
||||||
int64_t ti;
|
int64_t ti;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PROFILER
|
||||||
|
ti = profile_getclock();
|
||||||
|
#endif
|
||||||
|
if (use_icount) {
|
||||||
|
int64_t count;
|
||||||
|
int decr;
|
||||||
|
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
|
||||||
|
env->icount_decr.u16.low = 0;
|
||||||
|
env->icount_extra = 0;
|
||||||
|
count = qemu_next_deadline();
|
||||||
|
count = (count + (1 << icount_time_shift) - 1)
|
||||||
|
>> icount_time_shift;
|
||||||
|
qemu_icount += count;
|
||||||
|
decr = (count > 0xffff) ? 0xffff : count;
|
||||||
|
count -= decr;
|
||||||
|
env->icount_decr.u16.low = decr;
|
||||||
|
env->icount_extra = count;
|
||||||
|
}
|
||||||
|
ret = cpu_exec(env);
|
||||||
|
#ifdef CONFIG_PROFILER
|
||||||
|
qemu_time += profile_getclock() - ti;
|
||||||
|
#endif
|
||||||
|
if (use_icount) {
|
||||||
|
/* Fold pending instructions back into the
|
||||||
|
instruction counter, and clear the interrupt flag. */
|
||||||
|
qemu_icount -= (env->icount_decr.u16.low
|
||||||
|
+ env->icount_extra);
|
||||||
|
env->icount_decr.u32 = 0;
|
||||||
|
env->icount_extra = 0;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cpu_has_work(CPUState *env)
|
||||||
|
{
|
||||||
|
if (!env->halted)
|
||||||
|
return 1;
|
||||||
|
if (qemu_cpu_has_work(env))
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcg_has_work(void)
|
||||||
|
{
|
||||||
CPUState *env;
|
CPUState *env;
|
||||||
|
|
||||||
cur_cpu = first_cpu;
|
for (env = first_cpu; env != NULL; env = env->next_cpu)
|
||||||
next_cpu = cur_cpu->next_cpu ?: first_cpu;
|
if (cpu_has_work(env))
|
||||||
for(;;) {
|
return 1;
|
||||||
if (vm_running) {
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
for(;;) {
|
static int qemu_calculate_timeout(void)
|
||||||
/* get next cpu */
|
{
|
||||||
env = next_cpu;
|
int timeout;
|
||||||
#ifdef CONFIG_PROFILER
|
|
||||||
ti = profile_getclock();
|
|
||||||
#endif
|
|
||||||
if (use_icount) {
|
|
||||||
int64_t count;
|
|
||||||
int decr;
|
|
||||||
qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
|
|
||||||
env->icount_decr.u16.low = 0;
|
|
||||||
env->icount_extra = 0;
|
|
||||||
count = qemu_next_deadline();
|
|
||||||
count = (count + (1 << icount_time_shift) - 1)
|
|
||||||
>> icount_time_shift;
|
|
||||||
qemu_icount += count;
|
|
||||||
decr = (count > 0xffff) ? 0xffff : count;
|
|
||||||
count -= decr;
|
|
||||||
env->icount_decr.u16.low = decr;
|
|
||||||
env->icount_extra = count;
|
|
||||||
}
|
|
||||||
ret = cpu_exec(env);
|
|
||||||
#ifdef CONFIG_PROFILER
|
|
||||||
qemu_time += profile_getclock() - ti;
|
|
||||||
#endif
|
|
||||||
if (use_icount) {
|
|
||||||
/* Fold pending instructions back into the
|
|
||||||
instruction counter, and clear the interrupt flag. */
|
|
||||||
qemu_icount -= (env->icount_decr.u16.low
|
|
||||||
+ env->icount_extra);
|
|
||||||
env->icount_decr.u32 = 0;
|
|
||||||
env->icount_extra = 0;
|
|
||||||
}
|
|
||||||
next_cpu = env->next_cpu ?: first_cpu;
|
|
||||||
if (event_pending && likely(ret != EXCP_DEBUG)) {
|
|
||||||
ret = EXCP_INTERRUPT;
|
|
||||||
event_pending = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (ret == EXCP_HLT) {
|
|
||||||
/* Give the next CPU a chance to run. */
|
|
||||||
cur_cpu = env;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (ret != EXCP_HALTED)
|
|
||||||
break;
|
|
||||||
/* all CPUs are halted ? */
|
|
||||||
if (env == cur_cpu)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
cur_cpu = env;
|
|
||||||
|
|
||||||
if (shutdown_requested) {
|
if (!vm_running)
|
||||||
ret = EXCP_INTERRUPT;
|
timeout = 5000;
|
||||||
if (no_shutdown) {
|
else if (tcg_has_work())
|
||||||
vm_stop(0);
|
timeout = 0;
|
||||||
no_shutdown = 0;
|
else if (!use_icount)
|
||||||
}
|
timeout = 5000;
|
||||||
else
|
else {
|
||||||
break;
|
/* XXX: use timeout computed from timers */
|
||||||
}
|
int64_t add;
|
||||||
if (reset_requested) {
|
int64_t delta;
|
||||||
reset_requested = 0;
|
/* Advance virtual time to the next event. */
|
||||||
qemu_system_reset();
|
if (use_icount == 1) {
|
||||||
ret = EXCP_INTERRUPT;
|
/* When not using an adaptive execution frequency
|
||||||
}
|
we tend to get badly out of sync with real time,
|
||||||
if (powerdown_requested) {
|
so just delay for a reasonable amount of time. */
|
||||||
powerdown_requested = 0;
|
delta = 0;
|
||||||
qemu_system_powerdown();
|
|
||||||
ret = EXCP_INTERRUPT;
|
|
||||||
}
|
|
||||||
if (unlikely(ret == EXCP_DEBUG)) {
|
|
||||||
gdb_set_stop_cpu(cur_cpu);
|
|
||||||
vm_stop(EXCP_DEBUG);
|
|
||||||
}
|
|
||||||
/* If all cpus are halted then wait until the next IRQ */
|
|
||||||
/* XXX: use timeout computed from timers */
|
|
||||||
if (ret == EXCP_HALTED) {
|
|
||||||
if (use_icount) {
|
|
||||||
int64_t add;
|
|
||||||
int64_t delta;
|
|
||||||
/* Advance virtual time to the next event. */
|
|
||||||
if (use_icount == 1) {
|
|
||||||
/* When not using an adaptive execution frequency
|
|
||||||
we tend to get badly out of sync with real time,
|
|
||||||
so just delay for a reasonable amount of time. */
|
|
||||||
delta = 0;
|
|
||||||
} else {
|
|
||||||
delta = cpu_get_icount() - cpu_get_clock();
|
|
||||||
}
|
|
||||||
if (delta > 0) {
|
|
||||||
/* If virtual time is ahead of real time then just
|
|
||||||
wait for IO. */
|
|
||||||
timeout = (delta / 1000000) + 1;
|
|
||||||
} else {
|
|
||||||
/* Wait for either IO to occur or the next
|
|
||||||
timer event. */
|
|
||||||
add = qemu_next_deadline();
|
|
||||||
/* We advance the timer before checking for IO.
|
|
||||||
Limit the amount we advance so that early IO
|
|
||||||
activity won't get the guest too far ahead. */
|
|
||||||
if (add > 10000000)
|
|
||||||
add = 10000000;
|
|
||||||
delta += add;
|
|
||||||
add = (add + (1 << icount_time_shift) - 1)
|
|
||||||
>> icount_time_shift;
|
|
||||||
qemu_icount += add;
|
|
||||||
timeout = delta / 1000000;
|
|
||||||
if (timeout < 0)
|
|
||||||
timeout = 0;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
timeout = 5000;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
timeout = 0;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if (shutdown_requested) {
|
delta = cpu_get_icount() - cpu_get_clock();
|
||||||
ret = EXCP_INTERRUPT;
|
}
|
||||||
break;
|
if (delta > 0) {
|
||||||
}
|
/* If virtual time is ahead of real time then just
|
||||||
timeout = 5000;
|
wait for IO. */
|
||||||
|
timeout = (delta / 1000000) + 1;
|
||||||
|
} else {
|
||||||
|
/* Wait for either IO to occur or the next
|
||||||
|
timer event. */
|
||||||
|
add = qemu_next_deadline();
|
||||||
|
/* We advance the timer before checking for IO.
|
||||||
|
Limit the amount we advance so that early IO
|
||||||
|
activity won't get the guest too far ahead. */
|
||||||
|
if (add > 10000000)
|
||||||
|
add = 10000000;
|
||||||
|
delta += add;
|
||||||
|
add = (add + (1 << icount_time_shift) - 1)
|
||||||
|
>> icount_time_shift;
|
||||||
|
qemu_icount += add;
|
||||||
|
timeout = delta / 1000000;
|
||||||
|
if (timeout < 0)
|
||||||
|
timeout = 0;
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_PROFILER
|
|
||||||
ti = profile_getclock();
|
|
||||||
#endif
|
|
||||||
main_loop_wait(timeout);
|
|
||||||
#ifdef CONFIG_PROFILER
|
|
||||||
dev_time += profile_getclock() - ti;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
cpu_disable_ticks();
|
|
||||||
return ret;
|
return timeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vm_can_run(void)
|
||||||
|
{
|
||||||
|
if (powerdown_requested)
|
||||||
|
return 0;
|
||||||
|
if (reset_requested)
|
||||||
|
return 0;
|
||||||
|
if (shutdown_requested)
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void main_loop(void)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
#ifdef CONFIG_PROFILER
|
||||||
|
int64_t ti;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
do {
|
||||||
|
if (next_cpu == NULL)
|
||||||
|
next_cpu = first_cpu;
|
||||||
|
for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
|
||||||
|
CPUState *env = cur_cpu = next_cpu;
|
||||||
|
|
||||||
|
if (!vm_running)
|
||||||
|
break;
|
||||||
|
if (timer_alarm_pending) {
|
||||||
|
timer_alarm_pending = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ret = qemu_cpu_exec(env);
|
||||||
|
if (ret == EXCP_DEBUG) {
|
||||||
|
gdb_set_stop_cpu(env);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_PROFILER
|
||||||
|
ti = profile_getclock();
|
||||||
|
#endif
|
||||||
|
main_loop_wait(qemu_calculate_timeout());
|
||||||
|
#ifdef CONFIG_PROFILER
|
||||||
|
dev_time += profile_getclock() - ti;
|
||||||
|
#endif
|
||||||
|
} while (ret != EXCP_DEBUG && vm_can_run());
|
||||||
|
|
||||||
|
if (ret == EXCP_DEBUG)
|
||||||
|
vm_stop(EXCP_DEBUG);
|
||||||
|
|
||||||
|
if (qemu_shutdown_requested()) {
|
||||||
|
if (no_shutdown) {
|
||||||
|
vm_stop(0);
|
||||||
|
no_shutdown = 0;
|
||||||
|
} else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (qemu_reset_requested())
|
||||||
|
qemu_system_reset();
|
||||||
|
if (qemu_powerdown_requested())
|
||||||
|
qemu_system_powerdown();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void version(void)
|
static void version(void)
|
||||||
|
|
Loading…
Reference in New Issue