summaryrefslogtreecommitdiff
path: root/Kernel/Storage
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel/Storage')
-rw-r--r--Kernel/Storage/NVMe/NVMeQueue.cpp10
-rw-r--r--Kernel/Storage/NVMe/NVMeQueue.h15
2 files changed, 18 insertions, 7 deletions
diff --git a/Kernel/Storage/NVMe/NVMeQueue.cpp b/Kernel/Storage/NVMe/NVMeQueue.cpp
index 04408f4dc7..6736cf8043 100644
--- a/Kernel/Storage/NVMe/NVMeQueue.cpp
+++ b/Kernel/Storage/NVMe/NVMeQueue.cpp
@@ -75,8 +75,6 @@ u32 NVMeQueue::process_cq()
if (m_admin_queue == false) {
// As the block layer calls are now sync (as we wait on each requests),
// everything is operated on a single request similar to BMIDE driver.
- // TODO: Remove this constraint eventually.
- VERIFY(cmdid == m_prev_sq_tail);
if (m_current_request) {
complete_current_request(status);
}
@@ -92,9 +90,6 @@ u32 NVMeQueue::process_cq()
void NVMeQueue::submit_sqe(NVMeSubmission& sub)
{
SpinlockLocker lock(m_sq_lock);
- // For now let's use sq tail as a unique command id.
- sub.cmdid = m_sq_tail;
- m_prev_sq_tail = m_sq_tail;
memcpy(&m_sqe_array[m_sq_tail], &sub, sizeof(NVMeSubmission));
{
@@ -114,7 +109,8 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
{
// For now let's use sq tail as a unique command id.
u16 cqe_cid;
- u16 cid = m_sq_tail;
+ u16 cid = get_request_cid();
+ sub.cmdid = cid;
submit_sqe(sub);
do {
@@ -145,6 +141,7 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
// No. of lbas is 0 based
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
+ sub.cmdid = get_request_cid();
full_memory_barrier();
submit_sqe(sub);
@@ -166,6 +163,7 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
// No. of lbas is 0 based
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
+ sub.cmdid = get_request_cid();
full_memory_barrier();
submit_sqe(sub);
diff --git a/Kernel/Storage/NVMe/NVMeQueue.h b/Kernel/Storage/NVMe/NVMeQueue.h
index 46f386c726..617dfdfa7f 100644
--- a/Kernel/Storage/NVMe/NVMeQueue.h
+++ b/Kernel/Storage/NVMe/NVMeQueue.h
@@ -44,6 +44,19 @@ protected:
}
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
+ [[nodiscard]] u32 get_request_cid()
+ {
+ u32 expected_tag = m_tag.load(AK::memory_order_acquire);
+
+ for (;;) {
+ u32 cid = expected_tag + 1;
+ if (cid == m_qdepth)
+ cid = 0;
+ if (m_tag.compare_exchange_strong(expected_tag, cid, AK::memory_order_acquire))
+ return cid;
+ }
+ }
+
private:
bool cqe_available();
void update_cqe_head();
@@ -63,10 +76,10 @@ private:
u16 m_qid {};
u8 m_cq_valid_phase { 1 };
u16 m_sq_tail {};
- u16 m_prev_sq_tail {};
u16 m_cq_head {};
bool m_admin_queue { false };
u32 m_qdepth {};
+ Atomic<u32> m_tag { 0 }; // used for the cid in a submission queue entry
Spinlock<LockRank::Interrupts> m_sq_lock {};
OwnPtr<Memory::Region> m_cq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_cq_dma_page;