summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Franciosi <felipe@nutanix.com>2017-05-24 17:10:01 +0100
committerJuan Quintela <quintela@redhat.com>2017-05-31 09:39:20 +0200
commitd693c6f10ff46c661b055288abae11deb6181a61 (patch)
tree212bcc60192db98f33cc71d3c1eb4abbe040dc74
parent9884db2814fbf1eb2ed99e02dadf58534d3ecc25 (diff)
downloadqemu-d693c6f10ff46c661b055288abae11deb6181a61.zip
migration: set dirty_pages_rate before autoconverge logic
Currently, a "period" in the RAM migration logic is at least a second long and accounts for what happened since the last period (or the beginning of the migration). The dirty_pages_rate counter is calculated at the end this logic. If the auto convergence capability is enabled from the start of the migration, it won't be able to use this counter the first time around. This calculates dirty_pages_rate as soon as a period is deemed over, which allows for it to be used immediately. Signed-off-by: Felipe Franciosi <felipe@nutanix.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
-rw-r--r--migration/ram.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 36bf720c27..495ecbe492 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -694,6 +694,10 @@ static void migration_bitmap_sync(RAMState *rs)
/* more than 1 second = 1000 millisecons */
if (end_time > rs->time_last_bitmap_sync + 1000) {
+ /* calculate period counters */
+ rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
+ / (end_time - rs->time_last_bitmap_sync);
+
if (migrate_auto_converge()) {
/* The following detection logic can be refined later. For now:
Check to see if the dirtied bytes is 50% more than the approx.
@@ -702,15 +706,14 @@ static void migration_bitmap_sync(RAMState *rs)
throttling */
bytes_xfer_now = ram_bytes_transferred();
- if (rs->dirty_pages_rate &&
- (rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
+ if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
(bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
- (rs->dirty_rate_high_cnt++ >= 2)) {
+ (rs->dirty_rate_high_cnt++ >= 2)) {
trace_migration_throttle();
rs->dirty_rate_high_cnt = 0;
mig_throttle_guest_down();
- }
- rs->bytes_xfer_prev = bytes_xfer_now;
+ }
+ rs->bytes_xfer_prev = bytes_xfer_now;
}
if (migrate_use_xbzrle()) {
@@ -723,8 +726,8 @@ static void migration_bitmap_sync(RAMState *rs)
rs->iterations_prev = rs->iterations;
rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
}
- rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
- / (end_time - rs->time_last_bitmap_sync);
+
+ /* reset period counters */
rs->time_last_bitmap_sync = end_time;
rs->num_dirty_pages_period = 0;
}