summaryrefslogtreecommitdiff
path: root/Kernel/Storage
diff options
context:
space:
mode:
authorLiav A <liavalb@gmail.com>2022-01-23 20:30:45 +0200
committerLinus Groh <mail@linusgroh.de>2022-01-23 20:56:28 +0000
commitfc2c2c8a6dc64aab39ac0f7ff98ff6ebbdc29663 (patch)
tree2e253cd549a071e888599c207ac3e507a8340dac /Kernel/Storage
parent0778043d738a6b1b438ec6f327ea7c6aea242079 (diff)
downloadserenity-fc2c2c8a6dc64aab39ac0f7ff98ff6ebbdc29663.zip
Kernel/Storage: Remove NVMeQueue DMA buffer create method
Instead, try to allocate the DMA buffer before trying to construct the NVMeQueue. This allows us to fail early if we can't allocate the DMA buffer before allocating and creating the heavier NVMeQueue object.
Diffstat (limited to 'Kernel/Storage')
-rw-r--r--Kernel/Storage/NVMe/NVMeQueue.cpp18
-rw-r--r--Kernel/Storage/NVMe/NVMeQueue.h8
2 files changed, 11 insertions, 15 deletions
diff --git a/Kernel/Storage/NVMe/NVMeQueue.cpp b/Kernel/Storage/NVMe/NVMeQueue.cpp
index 26a2ab8f90..8871fce23e 100644
--- a/Kernel/Storage/NVMe/NVMeQueue.cpp
+++ b/Kernel/Storage/NVMe/NVMeQueue.cpp
@@ -15,12 +15,14 @@ namespace Kernel {
ErrorOr<NonnullRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
{
- auto queue = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) NVMeQueue(qid, irq, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
- TRY(queue->create());
+ // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
+ RefPtr<Memory::PhysicalPage> rw_dma_page;
+ auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
+ auto queue = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) NVMeQueue(move(rw_dma_region), *rw_dma_page, qid, irq, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
return queue;
}
-UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
+UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
: IRQHandler(irq)
, m_qid(qid)
, m_admin_queue(qid == 0)
@@ -30,7 +32,9 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memor
, m_cq_dma_page(cq_dma_page)
, m_sq_dma_region(move(sq_dma_region))
, m_sq_dma_page(sq_dma_page)
+ , m_rw_dma_region(move(rw_dma_region))
, m_db_regs(move(db_regs))
+ , m_rw_dma_page(rw_dma_page)
, m_current_request(nullptr)
{
@@ -38,14 +42,6 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memor
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
}
-UNMAP_AFTER_INIT ErrorOr<void> NVMeQueue::create()
-{
- // DMA region for RW operation. For now the requests don't exceed more than 4096 bytes(Storage device takes of it)
- auto buffer = TRY(MM.allocate_dma_buffer_page("NVMe Queue"sv, Memory::Region::Access::ReadWrite, m_rw_dma_page));
- m_rw_dma_region = move(buffer);
- return {};
-}
-
bool NVMeQueue::cqe_available()
{
return PHASE_TAG(m_cqe_array[m_cq_head].status) == m_cq_valid_phase;
diff --git a/Kernel/Storage/NVMe/NVMeQueue.h b/Kernel/Storage/NVMe/NVMeQueue.h
index fdec1be7a4..85dbd7ddc3 100644
--- a/Kernel/Storage/NVMe/NVMeQueue.h
+++ b/Kernel/Storage/NVMe/NVMeQueue.h
@@ -6,6 +6,7 @@
#pragma once
+#include <AK/NonnullRefPtr.h>
#include <AK/NonnullRefPtrVector.h>
#include <AK/OwnPtr.h>
#include <AK/RefCounted.h>
@@ -30,7 +31,6 @@ class NVMeQueue : public IRQHandler
, public RefCounted<NVMeQueue> {
public:
static ErrorOr<NonnullRefPtr<NVMeQueue>> try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
- ErrorOr<void> create();
bool is_admin_queue() { return m_admin_queue; };
void submit_sqe(NVMeSubmission&);
u16 submit_sync_sqe(NVMeSubmission&);
@@ -40,7 +40,7 @@ public:
void disable_interrupts() { disable_irq(); };
private:
- NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
+ NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
virtual bool handle_irq(const RegisterState&) override;
@@ -73,9 +73,9 @@ private:
OwnPtr<Memory::Region> m_sq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
Span<NVMeCompletion> m_cqe_array;
- OwnPtr<Memory::Region> m_rw_dma_region;
+ NonnullOwnPtr<Memory::Region> m_rw_dma_region;
Memory::TypedMapping<volatile DoorbellRegister> m_db_regs;
- RefPtr<Memory::PhysicalPage> m_rw_dma_page;
+ NonnullRefPtr<Memory::PhysicalPage> m_rw_dma_page;
Spinlock m_request_lock;
RefPtr<AsyncBlockDeviceRequest> m_current_request;
};