mirror of https://github.com/xemu-project/xemu.git
migration: cleanup stats update into function
We have quite a few lines in migration_thread() that calculates some statistics for the migration interations. Isolate it into a single function to improve readability. Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
39b9e17905
commit
b15df1ae50
|
@ -1303,6 +1303,8 @@ MigrationState *migrate_init(void)
|
||||||
s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||||
s->total_time = 0;
|
s->total_time = 0;
|
||||||
s->vm_was_running = false;
|
s->vm_was_running = false;
|
||||||
|
s->iteration_initial_bytes = 0;
|
||||||
|
s->threshold_size = 0;
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2209,6 +2211,43 @@ static void migration_calculate_complete(MigrationState *s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void migration_update_counters(MigrationState *s,
|
||||||
|
int64_t current_time)
|
||||||
|
{
|
||||||
|
uint64_t transferred, time_spent;
|
||||||
|
int64_t threshold_size;
|
||||||
|
double bandwidth;
|
||||||
|
|
||||||
|
if (current_time < s->iteration_start_time + BUFFER_DELAY) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
transferred = qemu_ftell(s->to_dst_file) - s->iteration_initial_bytes;
|
||||||
|
time_spent = current_time - s->iteration_start_time;
|
||||||
|
bandwidth = (double)transferred / time_spent;
|
||||||
|
threshold_size = bandwidth * s->parameters.downtime_limit;
|
||||||
|
|
||||||
|
s->mbps = (((double) transferred * 8.0) /
|
||||||
|
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if we haven't sent anything, we don't want to
|
||||||
|
* recalculate. 10000 is a small enough number for our purposes
|
||||||
|
*/
|
||||||
|
if (ram_counters.dirty_pages_rate && transferred > 10000) {
|
||||||
|
s->expected_downtime = ram_counters.dirty_pages_rate *
|
||||||
|
qemu_target_page_size() / bandwidth;
|
||||||
|
}
|
||||||
|
|
||||||
|
qemu_file_reset_rate_limit(s->to_dst_file);
|
||||||
|
|
||||||
|
s->iteration_start_time = current_time;
|
||||||
|
s->iteration_initial_bytes = qemu_ftell(s->to_dst_file);
|
||||||
|
|
||||||
|
trace_migrate_transferred(transferred, time_spent,
|
||||||
|
bandwidth, threshold_size);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Master migration thread on the source VM.
|
* Master migration thread on the source VM.
|
||||||
* It drives the migration and pumps the data down the outgoing channel.
|
* It drives the migration and pumps the data down the outgoing channel.
|
||||||
|
@ -2216,22 +2255,15 @@ static void migration_calculate_complete(MigrationState *s)
|
||||||
static void *migration_thread(void *opaque)
|
static void *migration_thread(void *opaque)
|
||||||
{
|
{
|
||||||
MigrationState *s = opaque;
|
MigrationState *s = opaque;
|
||||||
/* Used by the bandwidth calcs, updated later */
|
|
||||||
int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
||||||
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
||||||
int64_t initial_bytes = 0;
|
|
||||||
/*
|
|
||||||
* The final stage happens when the remaining data is smaller than
|
|
||||||
* this threshold; it's calculated from the requested downtime and
|
|
||||||
* measured bandwidth
|
|
||||||
*/
|
|
||||||
int64_t threshold_size = 0;
|
|
||||||
bool entered_postcopy = false;
|
bool entered_postcopy = false;
|
||||||
/* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
|
/* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
|
||||||
enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
|
enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
|
|
||||||
|
s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||||
|
|
||||||
qemu_savevm_state_header(s->to_dst_file);
|
qemu_savevm_state_header(s->to_dst_file);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2271,17 +2303,17 @@ static void *migration_thread(void *opaque)
|
||||||
if (!qemu_file_rate_limit(s->to_dst_file)) {
|
if (!qemu_file_rate_limit(s->to_dst_file)) {
|
||||||
uint64_t pend_post, pend_nonpost;
|
uint64_t pend_post, pend_nonpost;
|
||||||
|
|
||||||
qemu_savevm_state_pending(s->to_dst_file, threshold_size,
|
qemu_savevm_state_pending(s->to_dst_file, s->threshold_size,
|
||||||
&pend_nonpost, &pend_post);
|
&pend_nonpost, &pend_post);
|
||||||
pending_size = pend_nonpost + pend_post;
|
pending_size = pend_nonpost + pend_post;
|
||||||
trace_migrate_pending(pending_size, threshold_size,
|
trace_migrate_pending(pending_size, s->threshold_size,
|
||||||
pend_post, pend_nonpost);
|
pend_post, pend_nonpost);
|
||||||
if (pending_size && pending_size >= threshold_size) {
|
if (pending_size && pending_size >= s->threshold_size) {
|
||||||
/* Still a significant amount to transfer */
|
/* Still a significant amount to transfer */
|
||||||
|
|
||||||
if (migrate_postcopy() &&
|
if (migrate_postcopy() &&
|
||||||
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
|
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
|
||||||
pend_nonpost <= threshold_size &&
|
pend_nonpost <= s->threshold_size &&
|
||||||
atomic_read(&s->start_postcopy)) {
|
atomic_read(&s->start_postcopy)) {
|
||||||
|
|
||||||
if (!postcopy_start(s)) {
|
if (!postcopy_start(s)) {
|
||||||
|
@ -2306,33 +2338,15 @@ static void *migration_thread(void *opaque)
|
||||||
trace_migration_thread_file_err();
|
trace_migration_thread_file_err();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||||
if (current_time >= initial_time + BUFFER_DELAY) {
|
|
||||||
uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
|
|
||||||
initial_bytes;
|
|
||||||
uint64_t time_spent = current_time - initial_time;
|
|
||||||
double bandwidth = (double)transferred_bytes / time_spent;
|
|
||||||
threshold_size = bandwidth * s->parameters.downtime_limit;
|
|
||||||
|
|
||||||
s->mbps = (((double) transferred_bytes * 8.0) /
|
migration_update_counters(s, current_time);
|
||||||
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
|
||||||
|
|
||||||
trace_migrate_transferred(transferred_bytes, time_spent,
|
|
||||||
bandwidth, threshold_size);
|
|
||||||
/* if we haven't sent anything, we don't want to recalculate
|
|
||||||
10000 is a small enough number for our purposes */
|
|
||||||
if (ram_counters.dirty_pages_rate && transferred_bytes > 10000) {
|
|
||||||
s->expected_downtime = ram_counters.dirty_pages_rate *
|
|
||||||
qemu_target_page_size() / bandwidth;
|
|
||||||
}
|
|
||||||
|
|
||||||
qemu_file_reset_rate_limit(s->to_dst_file);
|
|
||||||
initial_time = current_time;
|
|
||||||
initial_bytes = qemu_ftell(s->to_dst_file);
|
|
||||||
}
|
|
||||||
if (qemu_file_rate_limit(s->to_dst_file)) {
|
if (qemu_file_rate_limit(s->to_dst_file)) {
|
||||||
/* usleep expects microseconds */
|
/* usleep expects microseconds */
|
||||||
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
|
g_usleep((s->iteration_start_time + BUFFER_DELAY -
|
||||||
|
current_time) * 1000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,6 +102,17 @@ struct MigrationState
|
||||||
QEMUBH *cleanup_bh;
|
QEMUBH *cleanup_bh;
|
||||||
QEMUFile *to_dst_file;
|
QEMUFile *to_dst_file;
|
||||||
|
|
||||||
|
/* bytes already send at the beggining of current interation */
|
||||||
|
uint64_t iteration_initial_bytes;
|
||||||
|
/* time at the start of current iteration */
|
||||||
|
int64_t iteration_start_time;
|
||||||
|
/*
|
||||||
|
* The final stage happens when the remaining data is smaller than
|
||||||
|
* this threshold; it's calculated from the requested downtime and
|
||||||
|
* measured bandwidth
|
||||||
|
*/
|
||||||
|
int64_t threshold_size;
|
||||||
|
|
||||||
/* params from 'migrate-set-parameters' */
|
/* params from 'migrate-set-parameters' */
|
||||||
MigrationParameters parameters;
|
MigrationParameters parameters;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue