summaryrefslogtreecommitdiff
path: root/block/nvme.c
diff options
context:
space:
mode:
authorEric Auger <eric.auger@redhat.com>2020-10-29 10:33:02 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2020-11-03 19:06:22 +0000
commitf8fd3ebac358c187d0aba7f922450ed6addf41a8 (patch)
treee55b27f32a77644035eb7f9439df3e82c238f4af /block/nvme.c
parent2387aaced7209872238eaf594997009cffd5501d (diff)
downloadqemu-f8fd3ebac358c187d0aba7f922450ed6addf41a8.zip
block/nvme: Change size and alignment of prp_list_pages
In preparation of 64kB host page support, let's change the size and alignment of the prp_list_pages so that the VFIO DMA MAP succeeds with 64kB host page size. We align on the host page size. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20201029093306.1063879-22-philmd@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com>
Diffstat (limited to 'block/nvme.c')
-rw-r--r--block/nvme.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/block/nvme.c b/block/nvme.c
index 4a8589d2d2..e807dd56df 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -215,6 +215,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
int i, r;
NVMeQueuePair *q;
uint64_t prp_list_iova;
+ size_t bytes;
q = g_try_new0(NVMeQueuePair, 1);
if (!q) {
@@ -222,19 +223,19 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
}
trace_nvme_create_queue_pair(idx, q, size, aio_context,
event_notifier_get_fd(s->irq_notifier));
- q->prp_list_pages = qemu_try_memalign(s->page_size,
- s->page_size * NVME_NUM_REQS);
+ bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
+ qemu_real_host_page_size);
+ q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
if (!q->prp_list_pages) {
goto fail;
}
- memset(q->prp_list_pages, 0, s->page_size * NVME_NUM_REQS);
+ memset(q->prp_list_pages, 0, bytes);
qemu_mutex_init(&q->lock);
q->s = s;
q->index = idx;
qemu_co_queue_init(&q->free_req_queue);
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
- r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
- s->page_size * NVME_NUM_REQS,
+ r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
false, &prp_list_iova);
if (r) {
goto fail;