summaryrefslogtreecommitdiff
path: root/Kernel/Storage/NVMe/NVMeQueue.cpp
blob: 649c737d0aa7453d019e522f8f8f897619dab574 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
/*
 * Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#include <Kernel/Arch/Delay.h>
#include <Kernel/StdLib.h>
#include <Kernel/Storage/NVMe/NVMeController.h>
#include <Kernel/Storage/NVMe/NVMeInterruptQueue.h>
#include <Kernel/Storage/NVMe/NVMePollQueue.h>
#include <Kernel/Storage/NVMe/NVMeQueue.h>

namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& device, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs, QueueType queue_type)
{
    // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
    RefPtr<Memory::PhysicalPage> rw_dma_page;
    auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
    if (queue_type == QueueType::Polled) {
        auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
        return queue;
    }
    auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(device, move(rw_dma_region), *rw_dma_page, qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
    return queue;
}

UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
    : m_rw_dma_region(move(rw_dma_region))
    , m_qid(qid)
    , m_admin_queue(qid == 0)
    , m_qdepth(q_depth)
    , m_cq_dma_region(move(cq_dma_region))
    , m_sq_dma_region(move(sq_dma_region))
    , m_db_regs(move(db_regs))
    , m_rw_dma_page(rw_dma_page)

{
    m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
    m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
    m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
}

bool NVMeQueue::cqe_available()
{
    return PHASE_TAG(m_cqe_array[m_cq_head].status) == m_cq_valid_phase;
}

void NVMeQueue::update_cqe_head()
{
    // To prevent overflow, use a temp variable
    u32 temp_cq_head = m_cq_head + 1;
    if (temp_cq_head == m_qdepth) {
        m_cq_head = 0;
        m_cq_valid_phase ^= 1;
    } else {
        m_cq_head = temp_cq_head;
    }
}

u32 NVMeQueue::process_cq()
{
    u32 nr_of_processed_cqes = 0;
    while (cqe_available()) {
        u16 status;
        u16 cmdid;
        ++nr_of_processed_cqes;
        status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
        cmdid = m_cqe_array[m_cq_head].command_id;
        dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);

        if (!m_requests.contains(cmdid)) {
            dmesgln("Bogus cmd id: {}", cmdid);
            VERIFY_NOT_REACHED();
        }
        complete_current_request(cmdid, status);
        update_cqe_head();
    }
    if (nr_of_processed_cqes) {
        update_cq_doorbell();
    }
    return nr_of_processed_cqes;
}

void NVMeQueue::submit_sqe(NVMeSubmission& sub)
{
    SpinlockLocker lock(m_sq_lock);

    memcpy(&m_sqe_array[m_sq_tail], &sub, sizeof(NVMeSubmission));
    {
        u32 temp_sq_tail = m_sq_tail + 1;
        if (temp_sq_tail == m_qdepth)
            m_sq_tail = 0;
        else
            m_sq_tail = temp_sq_tail;
    }

    dbgln_if(NVME_DEBUG, "NVMe: Submission with command identifier {}. SQ_TAIL: {}", sub.cmdid, m_sq_tail);
    full_memory_barrier();
    update_sq_doorbell();
}

u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
{
    // For now let's use sq tail as a unique command id.
    u16 cmd_status;
    u16 cid = get_request_cid();
    sub.cmdid = cid;

    {
        SpinlockLocker req_lock(m_request_lock);

        if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
            VERIFY_NOT_REACHED();
        m_requests.set(sub.cmdid, { nullptr, true, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
    }
    submit_sqe(sub);

    // FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
    //  move this logic into the block layer instead of sprinkling them in the driver code.
    m_sync_wait_queue.wait_forever("NVMe sync submit"sv);
    return cmd_status;
}

void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
{
    NVMeSubmission sub {};
    sub.op = OP_NVME_READ;
    sub.rw.nsid = nsid;
    sub.rw.slba = AK::convert_between_host_and_little_endian(index);
    // No. of lbas is 0 based
    sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
    sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
    sub.cmdid = get_request_cid();

    {
        SpinlockLocker req_lock(m_request_lock);
        if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
            VERIFY_NOT_REACHED();
        m_requests.set(sub.cmdid, { request, true, nullptr });
    }

    full_memory_barrier();
    submit_sqe(sub);
}

void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
{
    NVMeSubmission sub {};

    sub.op = OP_NVME_WRITE;
    sub.rw.nsid = nsid;
    sub.rw.slba = AK::convert_between_host_and_little_endian(index);
    // No. of lbas is 0 based
    sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
    sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
    sub.cmdid = get_request_cid();

    {
        SpinlockLocker req_lock(m_request_lock);
        if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
            VERIFY_NOT_REACHED();
        m_requests.set(sub.cmdid, { request, true, nullptr });
    }

    if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
        complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);
        return;
    }

    full_memory_barrier();
    submit_sqe(sub);
}

UNMAP_AFTER_INIT NVMeQueue::~NVMeQueue() = default;
}