diff options
author | David Hildenbrand <david@redhat.com> | 2021-02-16 11:50:39 +0100 |
---|---|---|
committer | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2021-05-13 18:21:13 +0100 |
commit | 1a37352277763220739290c689867540ec193d06 (patch) | |
tree | f1168ab90f5b86c99b0f660424e401f48ab93316 /migration | |
parent | dab59ce031228066eb95a9c518846fcacfb0dbbf (diff) | |
download | qemu-1a37352277763220739290c689867540ec193d06.zip |
migrate/ram: remove "ram_bulk_stage" and "fpo_enabled"
The bulk stage is kind of weird: migration_bitmap_find_dirty() will
indicate a dirty page, however, ram_save_host_page() will never save it, as
migration_bitmap_clear_dirty() detects that it is not dirty.
We already fill the bitmap in ram_list_init_bitmaps() with ones, marking
everything dirty - it didn't used to be that way, which is why we needed
an explicit first bulk stage.
Let's simplify: make the bitmap the single source of thuth. Explicitly
handle the "xbzrle_enabled after first round" case.
Regarding XBZRLE (implicitly handled via "ram_bulk_stage = false" right
now), there is now a slight change in behavior:
- Colo: When starting, it will be disabled (was implicitly enabled)
until the first round actually finishes.
- Free page hinting: When starting, XBZRLE will be disabled (was implicitly
enabled) until the first round actually finished.
- Snapshots: When starting, XBZRLE will be disabled. We essentially only
do a single run, so I guess it will never actually get disabled.
Postcopy seems to indirectly disable it in ram_save_page(), so there
shouldn't be really any change.
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Juan Quintela <quintela@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Andrey Gruzdev <andrey.gruzdev@virtuozzo.com>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210216105039.40680-1-david@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r-- | migration/ram.c | 78 |
1 files changed, 17 insertions, 61 deletions
diff --git a/migration/ram.c b/migration/ram.c index ace8ad431c..bee2756cd3 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -311,10 +311,6 @@ struct RAMState { ram_addr_t last_page; /* last ram version we have seen */ uint32_t last_version; - /* We are in the first round */ - bool ram_bulk_stage; - /* The free page optimization is enabled */ - bool fpo_enabled; /* How many times we have dirty too many pages */ int dirty_rate_high_cnt; /* these variables are used for bitmap sync */ @@ -330,6 +326,8 @@ struct RAMState { uint64_t xbzrle_pages_prev; /* Amount of xbzrle encoded bytes since the beginning of the period */ uint64_t xbzrle_bytes_prev; + /* Start using XBZRLE (e.g., after the first round). */ + bool xbzrle_enabled; /* compression statistics since the beginning of the period */ /* amount of count that no free thread to compress data */ @@ -383,15 +381,6 @@ int precopy_notify(PrecopyNotifyReason reason, Error **errp) return notifier_with_return_list_notify(&precopy_notifier_list, &pnd); } -void precopy_enable_free_page_optimization(void) -{ - if (!ram_state) { - return; - } - - ram_state->fpo_enabled = true; -} - uint64_t ram_bytes_remaining(void) { return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) : @@ -664,7 +653,7 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period, */ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) { - if (rs->ram_bulk_stage || !migrate_use_xbzrle()) { + if (!rs->xbzrle_enabled) { return; } @@ -792,23 +781,12 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, { unsigned long size = rb->used_length >> TARGET_PAGE_BITS; unsigned long *bitmap = rb->bmap; - unsigned long next; if (ramblock_is_ignored(rb)) { return size; } - /* - * When the free page optimization is enabled, we need to check the bitmap - * to send the non-free pages rather than all the pages in the bulk stage. - */ - if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) { - next = start + 1; - } else { - next = find_next_bit(bitmap, size, start); - } - - return next; + return find_next_bit(bitmap, size, start); } static inline bool migration_bitmap_clear_dirty(RAMState *rs, @@ -1185,8 +1163,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) trace_ram_save_page(block->idstr, (uint64_t)offset, p); XBZRLE_cache_lock(); - if (!rs->ram_bulk_stage && !migration_in_postcopy() && - migrate_use_xbzrle()) { + if (rs->xbzrle_enabled && !migration_in_postcopy()) { pages = save_xbzrle_page(rs, &p, current_addr, block, offset, last_stage); if (!last_stage) { @@ -1386,7 +1363,10 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) pss->block = QLIST_FIRST_RCU(&ram_list.blocks); /* Flag that we've looped */ pss->complete_round = true; - rs->ram_bulk_stage = false; + /* After the first round, enable XBZRLE. */ + if (migrate_use_xbzrle()) { + rs->xbzrle_enabled = true; + } } /* Didn't find anything this time, but try again on the new block */ *again = true; @@ -1801,14 +1781,6 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) if (block) { /* - * As soon as we start servicing pages out of order, then we have - * to kill the bulk stage, since the bulk stage assumes - * in (migration_bitmap_find_and_reset_dirty) that every page is - * dirty, that's no longer true. - */ - rs->ram_bulk_stage = false; - - /* * We want the background search to continue from the queued page * since the guest is likely to want other pages near to the page * it just requested. @@ -1920,15 +1892,15 @@ static bool save_page_use_compression(RAMState *rs) } /* - * If xbzrle is on, stop using the data compression after first - * round of migration even if compression is enabled. In theory, - * xbzrle can do better than compression. + * If xbzrle is enabled (e.g., after first round of migration), stop + * using the data compression. In theory, xbzrle can do better than + * compression. */ - if (rs->ram_bulk_stage || !migrate_use_xbzrle()) { - return true; + if (rs->xbzrle_enabled) { + return false; } - return false; + return true; } /* @@ -2235,8 +2207,7 @@ static void ram_state_reset(RAMState *rs) rs->last_sent_block = NULL; rs->last_page = 0; rs->last_version = ram_list.version; - rs->ram_bulk_stage = true; - rs->fpo_enabled = false; + rs->xbzrle_enabled = false; } #define MAX_WAIT 50 /* ms, half buffered_file limit */ @@ -2720,15 +2691,7 @@ static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out) /* This may not be aligned with current bitmaps. Recalculate. */ rs->migration_dirty_pages = pages; - rs->last_seen_block = NULL; - rs->last_sent_block = NULL; - rs->last_page = 0; - rs->last_version = ram_list.version; - /* - * Disable the bulk stage, otherwise we'll resend the whole RAM no - * matter what we have sent. - */ - rs->ram_bulk_stage = false; + ram_state_reset(rs); /* Update RAMState cache of output QEMUFile */ rs->f = out; @@ -3345,16 +3308,9 @@ static void decompress_data_with_multi_threads(QEMUFile *f, } } - /* - * we must set ram_bulk_stage to false, otherwise in - * migation_bitmap_find_dirty the bitmap will be unused and - * all the pages in ram cache wil be flushed to the ram of - * secondary VM. - */ static void colo_init_ram_state(void) { ram_state_init(&ram_state); - ram_state->ram_bulk_stage = false; } /* |