diff options
author | Pankaj Raghav <p.raghav@samsung.com> | 2023-03-29 10:25:04 +0200 |
---|---|---|
committer | Jelle Raaijmakers <jelle@gmta.nl> | 2023-04-05 12:45:27 +0200 |
commit | 3fe7bda0212740471473135757d82201fde58806 (patch) | |
tree | 839e85c67ca00534a12091db2ddea91bc138ecbe /Kernel/Storage | |
parent | e219662ce058ff0544a4735dc4b303d48dd7f6d7 (diff) | |
download | serenity-3fe7bda0212740471473135757d82201fde58806.zip |
Kernel/NVMe: Use an Atomic for command id instead of sq index
Using sq_tail as cid makes an inherent assumption that we send only
one IO at a time. Use an atomic variable instead for command id of a
submission queue entry.
As sq_tail is not used as cid anymore, remove m_prev_sq_tail which used
to hold the last used sq_tail value.
Diffstat (limited to 'Kernel/Storage')
-rw-r--r-- | Kernel/Storage/NVMe/NVMeQueue.cpp | 10 | ||||
-rw-r--r-- | Kernel/Storage/NVMe/NVMeQueue.h | 15 |
2 files changed, 18 insertions, 7 deletions
diff --git a/Kernel/Storage/NVMe/NVMeQueue.cpp b/Kernel/Storage/NVMe/NVMeQueue.cpp index 04408f4dc7..6736cf8043 100644 --- a/Kernel/Storage/NVMe/NVMeQueue.cpp +++ b/Kernel/Storage/NVMe/NVMeQueue.cpp @@ -75,8 +75,6 @@ u32 NVMeQueue::process_cq() if (m_admin_queue == false) { // As the block layer calls are now sync (as we wait on each requests), // everything is operated on a single request similar to BMIDE driver. - // TODO: Remove this constraint eventually. - VERIFY(cmdid == m_prev_sq_tail); if (m_current_request) { complete_current_request(status); } @@ -92,9 +90,6 @@ u32 NVMeQueue::process_cq() void NVMeQueue::submit_sqe(NVMeSubmission& sub) { SpinlockLocker lock(m_sq_lock); - // For now let's use sq tail as a unique command id. - sub.cmdid = m_sq_tail; - m_prev_sq_tail = m_sq_tail; memcpy(&m_sqe_array[m_sq_tail], &sub, sizeof(NVMeSubmission)); { @@ -114,7 +109,8 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub) { // For now let's use sq tail as a unique command id. u16 cqe_cid; - u16 cid = m_sq_tail; + u16 cid = get_request_cid(); + sub.cmdid = cid; submit_sqe(sub); do { @@ -145,6 +141,7 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 // No. of lbas is 0 based sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF); sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr())); + sub.cmdid = get_request_cid(); full_memory_barrier(); submit_sqe(sub); @@ -166,6 +163,7 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 // No. of lbas is 0 based sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF); sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr())); + sub.cmdid = get_request_cid(); full_memory_barrier(); submit_sqe(sub); diff --git a/Kernel/Storage/NVMe/NVMeQueue.h b/Kernel/Storage/NVMe/NVMeQueue.h index 46f386c726..617dfdfa7f 100644 --- a/Kernel/Storage/NVMe/NVMeQueue.h +++ b/Kernel/Storage/NVMe/NVMeQueue.h @@ -44,6 +44,19 @@ protected: } NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs); + [[nodiscard]] u32 get_request_cid() + { + u32 expected_tag = m_tag.load(AK::memory_order_acquire); + + for (;;) { + u32 cid = expected_tag + 1; + if (cid == m_qdepth) + cid = 0; + if (m_tag.compare_exchange_strong(expected_tag, cid, AK::memory_order_acquire)) + return cid; + } + } + private: bool cqe_available(); void update_cqe_head(); @@ -63,10 +76,10 @@ private: u16 m_qid {}; u8 m_cq_valid_phase { 1 }; u16 m_sq_tail {}; - u16 m_prev_sq_tail {}; u16 m_cq_head {}; bool m_admin_queue { false }; u32 m_qdepth {}; + Atomic<u32> m_tag { 0 }; // used for the cid in a submission queue entry Spinlock<LockRank::Interrupts> m_sq_lock {}; OwnPtr<Memory::Region> m_cq_dma_region; Vector<NonnullRefPtr<Memory::PhysicalPage>> m_cq_dma_page; |