diff options
author | Raphael Norwitz <raphael.norwitz@nutanix.com> | 2020-05-21 05:00:56 +0000 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2020-06-12 10:17:06 -0400 |
commit | 875b9fd97b34dae796d61330804b094ca7b1b403 (patch) | |
tree | 950c80fe4041c2c7a1c10161a205284a787aa092 /contrib/libvhost-user/libvhost-user.c | |
parent | ec94c8e621de96c50c2d381c8c9ec94f5beec7c1 (diff) | |
download | qemu-875b9fd97b34dae796d61330804b094ca7b1b403.zip |
Support individual region unmap in libvhost-user
When the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS protocol feature is
enabled, on memory hot-unplug qemu will transmit memory regions to
remove individually using the new message VHOST_USER_REM_MEM_REG
message. With this change, vhost-user backends build with libvhost-user
can now unmap individual memory regions when receiving the
VHOST_USER_REM_MEM_REG message.
Qemu only sends VHOST_USER_REM_MEM_REG messages when the
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS feature is negotiated, and
support for that feature has not yet been added in libvhost-user, this
new functionality is not yet used.
Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
Message-Id: <1588533678-23450-10-git-send-email-raphael.norwitz@nutanix.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'contrib/libvhost-user/libvhost-user.c')
-rw-r--r-- | contrib/libvhost-user/libvhost-user.c | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c index d8ee7a23a3..386449b697 100644 --- a/contrib/libvhost-user/libvhost-user.c +++ b/contrib/libvhost-user/libvhost-user.c @@ -139,6 +139,7 @@ vu_request_to_string(unsigned int req) REQ(VHOST_USER_VRING_KICK), REQ(VHOST_USER_GET_MAX_MEM_SLOTS), REQ(VHOST_USER_ADD_MEM_REG), + REQ(VHOST_USER_REM_MEM_REG), REQ(VHOST_USER_MAX), }; #undef REQ @@ -763,6 +764,66 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { } } +static inline bool reg_equal(VuDevRegion *vudev_reg, + VhostUserMemoryRegion *msg_reg) +{ + if (vudev_reg->gpa == msg_reg->guest_phys_addr && + vudev_reg->qva == msg_reg->userspace_addr && + vudev_reg->size == msg_reg->memory_size) { + return true; + } + + return false; +} + +static bool +vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { + int i, j; + bool found = false; + VuDevRegion shadow_regions[VHOST_MEMORY_MAX_NREGIONS] = {}; + VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; + + DPRINT("Removing region:\n"); + DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", + msg_region->guest_phys_addr); + DPRINT(" memory_size: 0x%016"PRIx64"\n", + msg_region->memory_size); + DPRINT(" userspace_addr 0x%016"PRIx64"\n", + msg_region->userspace_addr); + DPRINT(" mmap_offset 0x%016"PRIx64"\n", + msg_region->mmap_offset); + + for (i = 0, j = 0; i < dev->nregions; i++) { + if (!reg_equal(&dev->regions[i], msg_region)) { + shadow_regions[j].gpa = dev->regions[i].gpa; + shadow_regions[j].size = dev->regions[i].size; + shadow_regions[j].qva = dev->regions[i].qva; + shadow_regions[j].mmap_offset = dev->regions[i].mmap_offset; + j++; + } else { + found = true; + VuDevRegion *r = &dev->regions[i]; + void *m = (void *) (uintptr_t) r->mmap_addr; + + if (m) { + munmap(m, r->size + r->mmap_offset); + } + } + } + + if (found) { + memcpy(dev->regions, shadow_regions, + sizeof(VuDevRegion) * VHOST_MEMORY_MAX_NREGIONS); + DPRINT("Successfully removed a region\n"); + dev->nregions--; + vmsg_set_reply_u64(vmsg, 0); + } else { + vu_panic(dev, "Specified region not found\n"); + } + + return true; +} + static bool vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) { @@ -1771,6 +1832,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg) return vu_handle_get_max_memslots(dev, vmsg); case VHOST_USER_ADD_MEM_REG: return vu_add_mem_reg(dev, vmsg); + case VHOST_USER_REM_MEM_REG: + return vu_rem_mem_reg(dev, vmsg); default: vmsg_close_fds(vmsg); vu_panic(dev, "Unhandled request: %d", vmsg->request); |