summaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorJuan Quintela <quintela@redhat.com>2017-05-09 16:22:01 +0200
committerJuan Quintela <quintela@redhat.com>2017-06-07 10:20:54 +0200
commitc00e0928321914e693cf7f5838704612531340a7 (patch)
treef0ebdad7e85554a753f2347d638dc46c30bc7383 /migration
parent83c13382e43c4ce157ee26b6d0a6d1db801df78b (diff)
downloadqemu-c00e0928321914e693cf7f5838704612531340a7.zip
ram: Move ZERO_TARGET_PAGE inside XBZRLE
It was only used by XBZRLE anyways. Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/ram.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 701a1e6b54..ac30e9ea50 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -69,8 +69,6 @@
/* 0x80 is reserved in migration.h start with 0x100 next */
#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
-static uint8_t *ZERO_TARGET_PAGE;
-
static inline bool is_zero_range(uint8_t *p, uint64_t size)
{
return buffer_is_zero(p, size);
@@ -86,6 +84,8 @@ static struct {
/* Cache for XBZRLE, Protected by lock. */
PageCache *cache;
QemuMutex lock;
+ /* it will store a page full of zeros */
+ uint8_t *zero_target_page;
} XBZRLE;
/* buffer used for XBZRLE decoding */
@@ -512,7 +512,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
/* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */
- cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
+ cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
rs->bitmap_sync_count);
}
@@ -1456,10 +1456,11 @@ static void ram_migration_cleanup(void *opaque)
cache_fini(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
- g_free(ZERO_TARGET_PAGE);
+ g_free(XBZRLE.zero_target_page);
XBZRLE.cache = NULL;
XBZRLE.encoded_buf = NULL;
XBZRLE.current_buf = NULL;
+ XBZRLE.zero_target_page = NULL;
}
XBZRLE_cache_unlock();
migration_page_queue_free(rs);
@@ -1880,7 +1881,7 @@ static int ram_state_init(RAMState *rs)
if (migrate_use_xbzrle()) {
XBZRLE_cache_lock();
- ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
+ XBZRLE.zero_target_page = g_malloc0(TARGET_PAGE_SIZE);
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);