summaryrefslogtreecommitdiff
path: root/hw/virtio
diff options
context:
space:
mode:
authorCornelia Huck <cornelia.huck@de.ibm.com>2015-06-23 15:52:56 +0200
committerMichael S. Tsirkin <mst@redhat.com>2015-07-07 11:21:37 +0300
commitf329c74c1e7f08399f0d237f78571eb0ca6a89dd (patch)
treedcb64bc949c8a1896bfe510b784c56b2ea90e09f /hw/virtio
parentbe1e50a27d5b6845729ae0854f57f3816cf47edb (diff)
downloadqemu-f329c74c1e7f08399f0d237f78571eb0ca6a89dd.zip
Revert "dataplane: allow virtio-1 devices"
This reverts commit f5a5628cf0b65b223fa0c9031714578dfac4cf04. This was an old patch that had been already superseded by b0e5d90eb ("dataplane: endianness-aware accesses"). Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/dataplane/vring.c47
1 files changed, 21 insertions, 26 deletions
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index bed9b11c31..07fd69c69e 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -158,18 +158,15 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
}
-static int get_desc(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
+static int get_desc(Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
- int is_write = virtio_tswap16(vdev, desc->flags) & VRING_DESC_F_WRITE;
- uint32_t len = virtio_tswap32(vdev, desc->len);
- uint64_t desc_addr = virtio_tswap64(vdev, desc->addr);
- if (is_write) {
+ if (desc->flags & VRING_DESC_F_WRITE) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
@@ -193,17 +190,18 @@ static int get_desc(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
}
/* TODO handle non-contiguous memory across region boundaries */
- iov->iov_base = vring_map(&mr, desc_addr, len, is_write);
+ iov->iov_base = vring_map(&mr, desc->addr, desc->len,
+ desc->flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
- (uint64_t)desc_addr, len);
+ (uint64_t)desc->addr, desc->len);
return -EFAULT;
}
/* The MemoryRegion is looked up again and unref'ed later, leave the
* ref in place. */
- iov->iov_len = len;
- *addr = desc_addr;
+ iov->iov_len = desc->len;
+ *addr = desc->addr;
*num += 1;
return 0;
}
@@ -225,23 +223,21 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
int ret;
- uint32_t len = virtio_tswap32(vdev, indirect->len);
- uint64_t addr = virtio_tswap64(vdev, indirect->addr);
/* Sanity check */
- if (unlikely(len % sizeof(desc))) {
+ if (unlikely(indirect->len % sizeof(desc))) {
error_report("Invalid length in indirect descriptor: "
"len %#x not multiple of %#zx",
- len, sizeof(desc));
+ indirect->len, sizeof(desc));
vring->broken = true;
return -EFAULT;
}
- count = len / sizeof(desc);
+ count = indirect->len / sizeof(desc);
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
- error_report("Indirect buffer length too big: %d", len);
+ error_report("Indirect buffer length too big: %d", indirect->len);
vring->broken = true;
return -EFAULT;
}
@@ -252,12 +248,12 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
/* Translate indirect descriptor */
desc_ptr = vring_map(&mr,
- addr + found * sizeof(desc),
+ indirect->addr + found * sizeof(desc),
sizeof(desc), false);
if (!desc_ptr) {
error_report("Failed to map indirect descriptor "
"addr %#" PRIx64 " len %zu",
- (uint64_t)addr + found * sizeof(desc),
+ (uint64_t)indirect->addr + found * sizeof(desc),
sizeof(desc));
vring->broken = true;
return -EFAULT;
@@ -275,20 +271,19 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
return -EFAULT;
}
- if (unlikely(virtio_tswap16(vdev, desc.flags)
- & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
error_report("Nested indirect descriptor");
vring->broken = true;
return -EFAULT;
}
- ret = get_desc(vdev, vring, elem, &desc);
+ ret = get_desc(vring, elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
}
- i = virtio_tswap16(vdev, desc.next);
- } while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
+ i = desc.next;
+ } while (desc.flags & VRING_DESC_F_NEXT);
return 0;
}
@@ -389,7 +384,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* Ensure descriptor is loaded before accessing fields */
barrier();
- if (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_INDIRECT) {
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
ret = get_indirect(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
@@ -397,13 +392,13 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
continue;
}
- ret = get_desc(vdev, vring, elem, &desc);
+ ret = get_desc(vring, elem, &desc);
if (ret < 0) {
goto out;
}
- i = virtio_tswap16(vdev, desc.next);
- } while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
+ i = desc.next;
+ } while (desc.flags & VRING_DESC_F_NEXT);
/* On success, increment avail index. */
vring->last_avail_idx++;