summaryrefslogtreecommitdiff
path: root/contrib/libvhost-user/libvhost-user.c
diff options
context:
space:
mode:
authorDr. David Alan Gilbert <dgilbert@redhat.com>2019-08-12 17:35:19 +0100
committerDr. David Alan Gilbert <dgilbert@redhat.com>2020-01-23 16:41:37 +0000
commit49e9ec749d4db62ae51f76354143cee183912a1d (patch)
treed401953a743b16c3b3c12851e5ac669a6bb2f542 /contrib/libvhost-user/libvhost-user.c
parent1222f015558fc34cea02aa3a5a92de608c82cec8 (diff)
downloadqemu-49e9ec749d4db62ae51f76354143cee183912a1d.zip
libvhost-user: Fix some memtable remap cases
If a new setmemtable command comes in once the vhost threads are running, it will remap the guests address space and the threads will now be looking in the wrong place. Fortunately we're running this command under lock, so we can update the queue mappings so that threads will look in the new-right place. Note: This doesn't fix things that the threads might be doing without a lock (e.g. a readv/writev!) That's for another time. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'contrib/libvhost-user/libvhost-user.c')
-rw-r--r--contrib/libvhost-user/libvhost-user.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 63e41062a4..b89bf18501 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -565,6 +565,21 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
}
static bool
+map_ring(VuDev *dev, VuVirtq *vq)
+{
+ vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
+ vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
+ vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
+
+ DPRINT("Setting virtq addresses:\n");
+ DPRINT(" vring_desc at %p\n", vq->vring.desc);
+ DPRINT(" vring_used at %p\n", vq->vring.used);
+ DPRINT(" vring_avail at %p\n", vq->vring.avail);
+
+ return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
+}
+
+static bool
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
@@ -767,6 +782,14 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
close(vmsg->fds[i]);
}
+ for (i = 0; i < dev->max_queues; i++) {
+ if (dev->vq[i].vring.desc) {
+ if (map_ring(dev, &dev->vq[i])) {
+ vu_panic(dev, "remaping queue %d during setmemtable", i);
+ }
+ }
+ }
+
return false;
}
@@ -853,18 +876,12 @@ vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", vra->avail_user_addr);
DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", vra->log_guest_addr);
+ vq->vra = *vra;
vq->vring.flags = vra->flags;
- vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
- vq->vring.used = qva_to_va(dev, vra->used_user_addr);
- vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
vq->vring.log_guest_addr = vra->log_guest_addr;
- DPRINT("Setting virtq addresses:\n");
- DPRINT(" vring_desc at %p\n", vq->vring.desc);
- DPRINT(" vring_used at %p\n", vq->vring.used);
- DPRINT(" vring_avail at %p\n", vq->vring.avail);
- if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
+ if (map_ring(dev, vq)) {
vu_panic(dev, "Invalid vring_addr message");
return false;
}