diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2014-12-02 11:23:17 +0000 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-06-05 17:10:00 +0200 |
commit | 20015f72bda7d2f356c43580a5542a659afedf83 (patch) | |
tree | 144c1e6f4c8a07c382492553165f5daf9c74e069 /include | |
parent | d114875b9a1c21162f69a12d72f69a22e7bab376 (diff) | |
download | qemu-20015f72bda7d2f356c43580a5542a659afedf83.zip |
migration: move dirty bitmap sync to ram_addr.h
The dirty memory bitmap is managed by ram_addr.h and copied to
migration_bitmap[] periodically during live migration.
Move the code to sync the bitmap to ram_addr.h where related code lives.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <1417519399-3166-5-git-send-email-stefanha@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/exec/ram_addr.h | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 9f73076044..63db371850 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -218,5 +218,49 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length, unsigned client); +static inline +uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, + ram_addr_t start, + ram_addr_t length) +{ + ram_addr_t addr; + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); + uint64_t num_dirty = 0; + + /* start address is aligned at the start of a word? */ + if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { + int k; + int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]; + + for (k = page; k < page + nr; k++) { + if (src[k]) { + unsigned long new_dirty; + new_dirty = ~dest[k]; + dest[k] |= src[k]; + new_dirty &= src[k]; + num_dirty += ctpopl(new_dirty); + src[k] = 0; + } + } + } else { + for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { + if (cpu_physical_memory_get_dirty(start + addr, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_MIGRATION)) { + long k = (start + addr) >> TARGET_PAGE_BITS; + if (!test_and_set_bit(k, dest)) { + num_dirty++; + } + cpu_physical_memory_reset_dirty(start + addr, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_MIGRATION); + } + } + } + + return num_dirty; +} + #endif #endif |