summaryrefslogtreecommitdiff
path: root/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp
blob: 0cc00ccc08bcd49348b43cd2e1b2f191a018946e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
/*
 * Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#include "NVMeInterruptQueue.h"
#include "Kernel/Devices/BlockDevice.h"
#include "NVMeDefinitions.h"
#include <Kernel/WorkQueue.h>

namespace Kernel {

UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
    : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
    , IRQHandler(irq)
{
    enable_irq();
}

bool NVMeInterruptQueue::handle_irq(const RegisterState&)
{
    SpinlockLocker lock(m_request_lock);
    return process_cq() ? true : false;
}

void NVMeInterruptQueue::submit_sqe(NVMeSubmission& sub)
{
    NVMeQueue::submit_sqe(sub);
}

void NVMeInterruptQueue::complete_current_request(u16 status)
{
    VERIFY(m_request_lock.is_locked());

    g_io_work->queue([this, status]() {
        SpinlockLocker lock(m_request_lock);
        auto current_request = m_current_request;
        m_current_request.clear();
        if (status) {
            lock.unlock();
            current_request->complete(AsyncBlockDeviceRequest::Failure);
            return;
        }
        if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
            if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
                lock.unlock();
                current_request->complete(AsyncDeviceRequest::MemoryFault);
                return;
            }
        }
        lock.unlock();
        current_request->complete(AsyncDeviceRequest::Success);
        return;
    });
}
}