summaryrefslogtreecommitdiff
path: root/migration/migration.c
diff options
context:
space:
mode:
authorDr. David Alan Gilbert <dgilbert@redhat.com>2018-06-13 11:26:41 +0100
committerDr. David Alan Gilbert <dgilbert@redhat.com>2018-06-15 14:40:56 +0100
commitad767bed5af559ed32c6dabdcaccc3f75731e1f4 (patch)
treee29268c63f734e0d24788e5f1356d72647ab9d24 /migration/migration.c
parent7e555c6c58f095c9286814c3f9cd06196feba8a1 (diff)
downloadqemu-ad767bed5af559ed32c6dabdcaccc3f75731e1f4.zip
migration: Wake rate limiting for urgent requests
Rate limiting sleeps the migration thread for a while when it runs out of bandwidth; but sometimes we want to wake up to get on with something more urgent (like a postcopy request). Here we use a semaphore with a timedwait instead of a simple sleep; Incrementing the sempahore will wake it up sooner. Anything that consumes these urgent events must decrement the sempahore. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Message-Id: <20180613102642.23995-3-dgilbert@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration/migration.c')
-rw-r--r--migration/migration.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/migration/migration.c b/migration/migration.c
index 3a50d4c35c..108c3d7142 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2852,6 +2852,16 @@ static void migration_iteration_finish(MigrationState *s)
qemu_mutex_unlock_iothread();
}
+void migration_make_urgent_request(void)
+{
+ qemu_sem_post(&migrate_get_current()->rate_limit_sem);
+}
+
+void migration_consume_urgent_request(void)
+{
+ qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
+}
+
/*
* Master migration thread on the source VM.
* It drives the migration and pumps the data down the outgoing channel.
@@ -2861,6 +2871,7 @@ static void *migration_thread(void *opaque)
MigrationState *s = opaque;
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
MigThrError thr_error;
+ bool urgent = false;
rcu_register_thread();
@@ -2901,7 +2912,7 @@ static void *migration_thread(void *opaque)
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
int64_t current_time;
- if (!qemu_file_rate_limit(s->to_dst_file)) {
+ if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
MigIterateState iter_state = migration_iteration_run(s);
if (iter_state == MIG_ITERATE_SKIP) {
continue;
@@ -2932,10 +2943,24 @@ static void *migration_thread(void *opaque)
migration_update_counters(s, current_time);
+ urgent = false;
if (qemu_file_rate_limit(s->to_dst_file)) {
- /* usleep expects microseconds */
- g_usleep((s->iteration_start_time + BUFFER_DELAY -
- current_time) * 1000);
+ /* Wait for a delay to do rate limiting OR
+ * something urgent to post the semaphore.
+ */
+ int ms = s->iteration_start_time + BUFFER_DELAY - current_time;
+ trace_migration_thread_ratelimit_pre(ms);
+ if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
+ /* We were worken by one or more urgent things but
+ * the timedwait will have consumed one of them.
+ * The service routine for the urgent wake will dec
+ * the semaphore itself for each item it consumes,
+ * so add this one we just eat back.
+ */
+ qemu_sem_post(&s->rate_limit_sem);
+ urgent = true;
+ }
+ trace_migration_thread_ratelimit_post(urgent);
}
}
@@ -3109,6 +3134,7 @@ static void migration_instance_finalize(Object *obj)
qemu_mutex_destroy(&ms->qemu_file_lock);
g_free(params->tls_hostname);
g_free(params->tls_creds);
+ qemu_sem_destroy(&ms->rate_limit_sem);
qemu_sem_destroy(&ms->pause_sem);
qemu_sem_destroy(&ms->postcopy_pause_sem);
qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
@@ -3147,6 +3173,7 @@ static void migration_instance_init(Object *obj)
qemu_sem_init(&ms->postcopy_pause_sem, 0);
qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
qemu_sem_init(&ms->rp_state.rp_sem, 0);
+ qemu_sem_init(&ms->rate_limit_sem, 0);
qemu_mutex_init(&ms->qemu_file_lock);
}