diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-06-04 12:54:00 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-06-04 12:54:00 +0100 |
commit | b74588a493c12c8d389f08004318c7d01ebfda70 (patch) | |
tree | 4180c046efc7ed3a5e33275aeb6e54c3420f6097 | |
parent | 163670542fa3b33dbf0332b1e5d47c8d16393d42 (diff) | |
parent | c5e76115ccb4979cec795a8ae38becd07c2fde9f (diff) | |
download | qemu-b74588a493c12c8d389f08004318c7d01ebfda70.zip |
Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20180604' into staging
migration/next for 20180604
# gpg: Signature made Mon 04 Jun 2018 05:14:24 BST
# gpg: using RSA key F487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>"
# gpg: aka "Juan Quintela <quintela@trasno.org>"
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723
* remotes/juanquintela/tags/migration/20180604:
migration: not wait RDMA_CM_EVENT_DISCONNECTED event after rdma_disconnect
migration: remove unnecessary variables len in QIOChannelRDMA
migration: Don't activate block devices if using -S
migration: discard non-migratable RAMBlocks
migration: introduce decompress-error-check
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | exec.c | 38 | ||||
-rw-r--r-- | hw/arm/virt.c | 4 | ||||
-rw-r--r-- | hw/i386/pc_piix.c | 1 | ||||
-rw-r--r-- | hw/i386/pc_q35.c | 1 | ||||
-rw-r--r-- | include/exec/cpu-common.h | 4 | ||||
-rw-r--r-- | include/hw/compat.h | 7 | ||||
-rw-r--r-- | migration/migration.c | 38 | ||||
-rw-r--r-- | migration/migration.h | 7 | ||||
-rw-r--r-- | migration/postcopy-ram.c | 12 | ||||
-rw-r--r-- | migration/ram.c | 48 | ||||
-rw-r--r-- | migration/rdma.c | 27 | ||||
-rw-r--r-- | migration/savevm.c | 2 | ||||
-rw-r--r-- | migration/trace-events | 1 | ||||
-rw-r--r-- | qapi/migration.json | 6 |
14 files changed, 149 insertions, 47 deletions
@@ -104,6 +104,9 @@ static MemoryRegion io_mem_unassigned; * (Set during postcopy) */ #define RAM_UF_ZEROPAGE (1 << 3) + +/* RAM can be migrated */ +#define RAM_MIGRATABLE (1 << 4) #endif #ifdef TARGET_PAGE_BITS_VARY @@ -1839,6 +1842,21 @@ void qemu_ram_set_uf_zeroable(RAMBlock *rb) rb->flags |= RAM_UF_ZEROPAGE; } +bool qemu_ram_is_migratable(RAMBlock *rb) +{ + return rb->flags & RAM_MIGRATABLE; +} + +void qemu_ram_set_migratable(RAMBlock *rb) +{ + rb->flags |= RAM_MIGRATABLE; +} + +void qemu_ram_unset_migratable(RAMBlock *rb) +{ + rb->flags &= ~RAM_MIGRATABLE; +} + /* Called with iothread lock held. */ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) { @@ -3894,6 +3912,26 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) return ret; } +int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque) +{ + RAMBlock *block; + int ret = 0; + + rcu_read_lock(); + RAMBLOCK_FOREACH(block) { + if (!qemu_ram_is_migratable(block)) { + continue; + } + ret = func(block->idstr, block->host, block->offset, + block->used_length, opaque); + if (ret) { + break; + } + } + rcu_read_unlock(); + return ret; +} + /* * Unmap pages of memory from start to start+length such that * they a) read as 0, b) Trigger whatever fault mechanism diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 3aa19b2935..f0a4fa004c 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -1692,6 +1692,9 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +#define VIRT_COMPAT_2_12 \ + HW_COMPAT_2_12 + static void virt_2_12_instance_init(Object *obj) { VirtMachineState *vms = VIRT_MACHINE(obj); @@ -1762,6 +1765,7 @@ static void virt_2_12_instance_init(Object *obj) static void virt_machine_2_12_options(MachineClass *mc) { + SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_12); } DEFINE_VIRT_MACHINE_AS_LATEST(2, 12) diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index b4c5b03274..3d81136065 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -430,6 +430,7 @@ static void pc_i440fx_3_0_machine_options(MachineClass *m) pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = 1; + SET_MACHINE_COMPAT(m, PC_COMPAT_2_12); } DEFINE_I440FX_MACHINE(v3_0, "pc-i440fx-3.0", NULL, diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 83d6d75efa..b60cbb9266 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -312,6 +312,7 @@ static void pc_q35_3_0_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; + SET_MACHINE_COMPAT(m, PC_COMPAT_2_12); } DEFINE_Q35_MACHINE(v3_0, "pc-q35-3.0", NULL, diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 24d335f95d..0b58e262f3 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -75,6 +75,9 @@ const char *qemu_ram_get_idstr(RAMBlock *rb); bool qemu_ram_is_shared(RAMBlock *rb); bool qemu_ram_is_uf_zeroable(RAMBlock *rb); void qemu_ram_set_uf_zeroable(RAMBlock *rb); +bool qemu_ram_is_migratable(RAMBlock *rb); +void qemu_ram_set_migratable(RAMBlock *rb); +void qemu_ram_unset_migratable(RAMBlock *rb); size_t qemu_ram_pagesize(RAMBlock *block); size_t qemu_ram_pagesize_largest(void); @@ -119,6 +122,7 @@ typedef int (RAMBlockIterFunc)(const char *block_name, void *host_addr, ram_addr_t offset, ram_addr_t length, void *opaque); int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); +int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque); int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); #endif diff --git a/include/hw/compat.h b/include/hw/compat.h index 4681c2719a..563908b874 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -1,7 +1,12 @@ #ifndef HW_COMPAT_H #define HW_COMPAT_H -#define HW_COMPAT_2_12 +#define HW_COMPAT_2_12 \ + {\ + .driver = "migration",\ + .property = "decompress-error-check",\ + .value = "off",\ + }, #define HW_COMPAT_2_11 \ {\ diff --git a/migration/migration.c b/migration/migration.c index 05aec2c905..1e99ec9b7e 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -202,6 +202,16 @@ static void migrate_generate_event(int new_state) } } +static bool migrate_late_block_activate(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[ + MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE]; +} + /* * Called on -incoming with a defer: uri. * The migration can be started later after any parameters have been @@ -311,13 +321,23 @@ static void process_incoming_migration_bh(void *opaque) Error *local_err = NULL; MigrationIncomingState *mis = opaque; - /* Make sure all file formats flush their mutable metadata. - * If we get an error here, just don't restart the VM yet. */ - bdrv_invalidate_cache_all(&local_err); - if (local_err) { - error_report_err(local_err); - local_err = NULL; - autostart = false; + /* If capability late_block_activate is set: + * Only fire up the block code now if we're going to restart the + * VM, else 'cont' will do it. + * This causes file locking to happen; so we don't want it to happen + * unless we really are starting the VM. + */ + if (!migrate_late_block_activate() || + (autostart && (!global_state_received() || + global_state_get_runstate() == RUN_STATE_RUNNING))) { + /* Make sure all file formats flush their mutable metadata. + * If we get an error here, just don't restart the VM yet. */ + bdrv_invalidate_cache_all(&local_err); + if (local_err) { + error_report_err(local_err); + local_err = NULL; + autostart = false; + } } /* @@ -2971,6 +2991,8 @@ void migration_global_dump(Monitor *mon) ms->send_configuration ? "on" : "off"); monitor_printf(mon, "send-section-footer: %s\n", ms->send_section_footer ? "on" : "off"); + monitor_printf(mon, "decompress-error-check: %s\n", + ms->decompress_error_check ? "on" : "off"); } #define DEFINE_PROP_MIG_CAP(name, x) \ @@ -2984,6 +3006,8 @@ static Property migration_properties[] = { send_configuration, true), DEFINE_PROP_BOOL("send-section-footer", MigrationState, send_section_footer, true), + DEFINE_PROP_BOOL("decompress-error-check", MigrationState, + decompress_error_check, true), /* Migration parameters */ DEFINE_PROP_UINT8("x-compress-level", MigrationState, diff --git a/migration/migration.h b/migration/migration.h index 8f0c82159b..5af57d616c 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -212,6 +212,13 @@ struct MigrationState /* Needed by postcopy-pause state */ QemuSemaphore postcopy_pause_sem; QemuSemaphore postcopy_pause_rp_sem; + /* + * Whether we abort the migration if decompression errors are + * detected at the destination. It is left at false for qemu + * older than 3.0, since only newer qemu sends streams that + * do not trigger spurious decompression errors. + */ + bool decompress_error_check; }; void migrate_set_state(int *state, int old_state, int new_state); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 658b750a8e..48e51556a7 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -374,7 +374,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) } /* We don't support postcopy with shared RAM yet */ - if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) { + if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) { goto out; } @@ -502,7 +502,7 @@ static int cleanup_range(const char *block_name, void *host_addr, */ int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) { - if (qemu_ram_foreach_block(init_range, NULL)) { + if (qemu_ram_foreach_migratable_block(init_range, NULL)) { return -1; } @@ -524,7 +524,7 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) return -1; } - if (qemu_ram_foreach_block(cleanup_range, mis)) { + if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) { return -1; } /* Let the fault thread quit */ @@ -593,7 +593,7 @@ static int nhp_range(const char *block_name, void *host_addr, */ int postcopy_ram_prepare_discard(MigrationIncomingState *mis) { - if (qemu_ram_foreach_block(nhp_range, mis)) { + if (qemu_ram_foreach_migratable_block(nhp_range, mis)) { return -1; } @@ -604,7 +604,7 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis) /* * Mark the given area of RAM as requiring notification to unwritten areas - * Used as a callback on qemu_ram_foreach_block. + * Used as a callback on qemu_ram_foreach_migratable_block. * host_addr: Base of area to mark * offset: Offset in the whole ram arena * length: Length of the section @@ -1099,7 +1099,7 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis) mis->have_fault_thread = true; /* Mark so that we get notified of accesses to unwritten areas */ - if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { + if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) { return -1; } diff --git a/migration/ram.c b/migration/ram.c index f4e29cadc6..a500015a2f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -157,11 +157,16 @@ out: return ret; } +/* Should be holding either ram_list.mutex, or the RCU lock. */ +#define RAMBLOCK_FOREACH_MIGRATABLE(block) \ + RAMBLOCK_FOREACH(block) \ + if (!qemu_ram_is_migratable(block)) {} else + static void ramblock_recv_map_init(void) { RAMBlock *rb; - RAMBLOCK_FOREACH(rb) { + RAMBLOCK_FOREACH_MIGRATABLE(rb) { assert(!rb->receivedmap); rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits()); } @@ -1078,6 +1083,10 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, unsigned long *bitmap = rb->bmap; unsigned long next; + if (!qemu_ram_is_migratable(rb)) { + return size; + } + if (rs->ram_bulk_stage && start > 0) { next = start + 1; } else { @@ -1123,7 +1132,7 @@ uint64_t ram_pagesize_summary(void) RAMBlock *block; uint64_t summary = 0; - RAMBLOCK_FOREACH(block) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { summary |= block->page_size; } @@ -1147,7 +1156,7 @@ static void migration_bitmap_sync(RAMState *rs) qemu_mutex_lock(&rs->bitmap_mutex); rcu_read_lock(); - RAMBLOCK_FOREACH(block) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { migration_bitmap_sync_range(rs, block, 0, block->used_length); } rcu_read_unlock(); @@ -1786,6 +1795,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, size_t pagesize_bits = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; + if (!qemu_ram_is_migratable(pss->block)) { + error_report("block %s should not be migrated !", pss->block->idstr); + return 0; + } + do { /* Check the pages is dirty and if it is send it */ if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { @@ -1884,7 +1898,7 @@ uint64_t ram_bytes_total(void) uint64_t total = 0; rcu_read_lock(); - RAMBLOCK_FOREACH(block) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { total += block->used_length; } rcu_read_unlock(); @@ -1939,7 +1953,7 @@ static void ram_save_cleanup(void *opaque) */ memory_global_dirty_log_stop(); - QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { g_free(block->bmap); block->bmap = NULL; g_free(block->unsentmap); @@ -2002,7 +2016,7 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms) { struct RAMBlock *block; - RAMBLOCK_FOREACH(block) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { unsigned long *bitmap = block->bmap; unsigned long range = block->used_length >> TARGET_PAGE_BITS; unsigned long run_start = find_next_zero_bit(bitmap, range, 0); @@ -2080,7 +2094,7 @@ static int postcopy_each_ram_send_discard(MigrationState *ms) struct RAMBlock *block; int ret; - RAMBLOCK_FOREACH(block) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { PostcopyDiscardState *pds = postcopy_discard_send_init(ms, block->idstr); @@ -2288,7 +2302,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) rs->last_sent_block = NULL; rs->last_page = 0; - QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { unsigned long pages = block->used_length >> TARGET_PAGE_BITS; unsigned long *bitmap = block->bmap; unsigned long *unsentmap = block->unsentmap; @@ -2447,7 +2461,7 @@ static void ram_list_init_bitmaps(void) /* Skip setting bitmap if there is no RAM */ if (ram_bytes_total()) { - QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { pages = block->max_length >> TARGET_PAGE_BITS; block->bmap = bitmap_new(pages); bitmap_set(block->bmap, 0, pages); @@ -2563,7 +2577,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); - RAMBLOCK_FOREACH(block) { + RAMBLOCK_FOREACH_MIGRATABLE(block) { qemu_put_byte(f, strlen(block->idstr)); qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); qemu_put_be64(f, block->used_length); @@ -2807,6 +2821,11 @@ static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags) return NULL; } + if (!qemu_ram_is_migratable(block)) { + error_report("block %s should not be migrated !", id); + return NULL; + } + return block; } @@ -2881,7 +2900,7 @@ static void *do_data_decompress(void *opaque) ret = qemu_uncompress_data(¶m->stream, des, pagesize, param->compbuf, len); - if (ret < 0) { + if (ret < 0 && migrate_get_current()->decompress_error_check) { error_report("decompress data failed"); qemu_file_set_error(decomp_file, ret); } @@ -3049,7 +3068,7 @@ static int ram_load_cleanup(void *opaque) xbzrle_load_cleanup(); compress_threads_load_cleanup(); - RAMBLOCK_FOREACH(rb) { + RAMBLOCK_FOREACH_MIGRATABLE(rb) { g_free(rb->receivedmap); rb->receivedmap = NULL; } @@ -3311,7 +3330,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) length = qemu_get_be64(f); block = qemu_ram_block_by_name(id); - if (block) { + if (block && !qemu_ram_is_migratable(block)) { + error_report("block %s should not be migrated !", id); + ret = -EINVAL; + } else if (block) { if (length != block->used_length) { Error *local_err = NULL; diff --git a/migration/rdma.c b/migration/rdma.c index 7d233b0820..05aee3d591 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -400,7 +400,6 @@ struct QIOChannelRDMA { QIOChannel parent; RDMAContext *rdma; QEMUFile *file; - size_t len; bool blocking; /* XXX we don't actually honour this yet */ }; @@ -2268,8 +2267,7 @@ static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma, static void qemu_rdma_cleanup(RDMAContext *rdma) { - struct rdma_cm_event *cm_event; - int ret, idx; + int idx; if (rdma->cm_id && rdma->connected) { if ((rdma->error_state || @@ -2283,14 +2281,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma) qemu_rdma_post_send_control(rdma, NULL, &head); } - ret = rdma_disconnect(rdma->cm_id); - if (!ret) { - trace_qemu_rdma_cleanup_waiting_for_disconnect(); - ret = rdma_get_cm_event(rdma->channel, &cm_event); - if (!ret) { - rdma_ack_cm_event(cm_event); - } - } + rdma_disconnect(rdma->cm_id); trace_qemu_rdma_cleanup_disconnect(); rdma->connected = false; } @@ -2608,6 +2599,7 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, int ret; ssize_t done = 0; size_t i; + size_t len = 0; CHECK_ERROR_STATE(); @@ -2627,10 +2619,10 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, while (remaining) { RDMAControlHeader head; - rioc->len = MIN(remaining, RDMA_SEND_INCREMENT); - remaining -= rioc->len; + len = MIN(remaining, RDMA_SEND_INCREMENT); + remaining -= len; - head.len = rioc->len; + head.len = len; head.type = RDMA_CONTROL_QEMU_FILE; ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL); @@ -2640,8 +2632,8 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, return ret; } - data += rioc->len; - done += rioc->len; + data += len; + done += len; } } @@ -2736,8 +2728,7 @@ static ssize_t qio_channel_rdma_readv(QIOChannel *ioc, } } } - rioc->len = done; - return rioc->len; + return done; } /* diff --git a/migration/savevm.c b/migration/savevm.c index da724c52f2..c2f34ffc7c 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2689,11 +2689,13 @@ void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) { qemu_ram_set_idstr(mr->ram_block, memory_region_name(mr), dev); + qemu_ram_set_migratable(mr->ram_block); } void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) { qemu_ram_unset_idstr(mr->ram_block); + qemu_ram_unset_migratable(mr->ram_block); } void vmstate_register_ram_global(MemoryRegion *mr) diff --git a/migration/trace-events b/migration/trace-events index 3c798ddd11..4a768eaaeb 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -146,7 +146,6 @@ qemu_rdma_accept_pin_state(bool pin) "%d" qemu_rdma_accept_pin_verbsc(void *verbs) "Verbs context after listen: %p" qemu_rdma_block_for_wrid_miss(const char *wcompstr, int wcomp, const char *gcompstr, uint64_t req) "A Wanted wrid %s (%d) but got %s (%" PRIu64 ")" qemu_rdma_cleanup_disconnect(void) "" -qemu_rdma_cleanup_waiting_for_disconnect(void) "" qemu_rdma_close(void) "" qemu_rdma_connect_pin_all_requested(void) "" qemu_rdma_connect_pin_all_outcome(bool pin) "%d" diff --git a/qapi/migration.json b/qapi/migration.json index dc9cc85545..f7e10ee90f 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -376,13 +376,17 @@ # @postcopy-blocktime: Calculate downtime for postcopy live migration # (since 3.0) # +# @late-block-activate: If enabled, the destination will not activate block +# devices (and thus take locks) immediately at the end of migration. +# (since 3.0) +# # Since: 1.2 ## { 'enum': 'MigrationCapability', 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', 'block', 'return-path', 'pause-before-switchover', 'x-multifd', - 'dirty-bitmaps', 'postcopy-blocktime' ] } + 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate' ] } ## # @MigrationCapabilityStatus: |