diff options
author | Raphael Norwitz <raphael.norwitz@nutanix.com> | 2020-05-21 05:00:52 +0000 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2020-06-12 10:17:06 -0400 |
commit | ec94c8e621de96c50c2d381c8c9ec94f5beec7c1 (patch) | |
tree | 3600da2ea82557244b2183a6dd93831e7cda6009 /contrib/libvhost-user/libvhost-user.c | |
parent | 6fb2e173d20c9bbb5466183d33a3ad7dcd0375fa (diff) | |
download | qemu-ec94c8e621de96c50c2d381c8c9ec94f5beec7c1.zip |
Support adding individual regions in libvhost-user
When the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS is enabled, qemu will
transmit memory regions to a backend individually using the new message
VHOST_USER_ADD_MEM_REG. With this change vhost-user backends built with
libvhost-user can now map in new memory regions when VHOST_USER_ADD_MEM_REG
messages are received.
Qemu only sends VHOST_USER_ADD_MEM_REG messages when the
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS feature is negotiated, and
since it is not yet supported in libvhost-user, this new functionality
is not yet used.
Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
Message-Id: <1588533678-23450-9-git-send-email-raphael.norwitz@nutanix.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'contrib/libvhost-user/libvhost-user.c')
-rw-r--r-- | contrib/libvhost-user/libvhost-user.c | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c index 9f039b707e..d8ee7a23a3 100644 --- a/contrib/libvhost-user/libvhost-user.c +++ b/contrib/libvhost-user/libvhost-user.c @@ -138,6 +138,7 @@ vu_request_to_string(unsigned int req) REQ(VHOST_USER_GPU_SET_SOCKET), REQ(VHOST_USER_VRING_KICK), REQ(VHOST_USER_GET_MAX_MEM_SLOTS), + REQ(VHOST_USER_ADD_MEM_REG), REQ(VHOST_USER_MAX), }; #undef REQ @@ -663,6 +664,106 @@ generate_faults(VuDev *dev) { } static bool +vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { + int i; + bool track_ramblocks = dev->postcopy_listening; + VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; + VuDevRegion *dev_region = &dev->regions[dev->nregions]; + void *mmap_addr; + + /* + * If we are in postcopy mode and we receive a u64 payload with a 0 value + * we know all the postcopy client bases have been recieved, and we + * should start generating faults. + */ + if (track_ramblocks && + vmsg->size == sizeof(vmsg->payload.u64) && + vmsg->payload.u64 == 0) { + (void)generate_faults(dev); + return false; + } + + DPRINT("Adding region: %d\n", dev->nregions); + DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", + msg_region->guest_phys_addr); + DPRINT(" memory_size: 0x%016"PRIx64"\n", + msg_region->memory_size); + DPRINT(" userspace_addr 0x%016"PRIx64"\n", + msg_region->userspace_addr); + DPRINT(" mmap_offset 0x%016"PRIx64"\n", + msg_region->mmap_offset); + + dev_region->gpa = msg_region->guest_phys_addr; + dev_region->size = msg_region->memory_size; + dev_region->qva = msg_region->userspace_addr; + dev_region->mmap_offset = msg_region->mmap_offset; + + /* + * We don't use offset argument of mmap() since the + * mapped address has to be page aligned, and we use huge + * pages. + */ + if (track_ramblocks) { + /* + * In postcopy we're using PROT_NONE here to catch anyone + * accessing it before we userfault. + */ + mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, + PROT_NONE, MAP_SHARED, + vmsg->fds[0], 0); + } else { + mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, + PROT_READ | PROT_WRITE, MAP_SHARED, vmsg->fds[0], + 0); + } + + if (mmap_addr == MAP_FAILED) { + vu_panic(dev, "region mmap error: %s", strerror(errno)); + } else { + dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; + DPRINT(" mmap_addr: 0x%016"PRIx64"\n", + dev_region->mmap_addr); + } + + close(vmsg->fds[0]); + + if (track_ramblocks) { + /* + * Return the address to QEMU so that it can translate the ufd + * fault addresses back. + */ + msg_region->userspace_addr = (uintptr_t)(mmap_addr + + dev_region->mmap_offset); + + /* Send the message back to qemu with the addresses filled in. */ + vmsg->fd_num = 0; + if (!vu_send_reply(dev, dev->sock, vmsg)) { + vu_panic(dev, "failed to respond to add-mem-region for postcopy"); + return false; + } + + DPRINT("Successfully added new region in postcopy\n"); + dev->nregions++; + return false; + + } else { + for (i = 0; i < dev->max_queues; i++) { + if (dev->vq[i].vring.desc) { + if (map_ring(dev, &dev->vq[i])) { + vu_panic(dev, "remapping queue %d for new memory region", + i); + } + } + } + + DPRINT("Successfully added new region\n"); + dev->nregions++; + vmsg_set_reply_u64(vmsg, 0); + return true; + } +} + +static bool vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) { int i; @@ -1668,6 +1769,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg) return vu_handle_vring_kick(dev, vmsg); case VHOST_USER_GET_MAX_MEM_SLOTS: return vu_handle_get_max_memslots(dev, vmsg); + case VHOST_USER_ADD_MEM_REG: + return vu_add_mem_reg(dev, vmsg); default: vmsg_close_fds(vmsg); vu_panic(dev, "Unhandled request: %d", vmsg->request); |