diff options
author | Tom <tomut@yahoo.com> | 2020-11-02 11:16:01 -0700 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-11-12 18:04:30 +0100 |
commit | e445ff670dd1c715945832147ae3fb8724485ad6 (patch) | |
tree | b75644638c8a13b85288dd33d6eadbe3714ca6c7 /Kernel/Devices/AsyncDeviceRequest.cpp | |
parent | 91db31880faae9526693670bb76e186a59959887 (diff) | |
download | serenity-e445ff670dd1c715945832147ae3fb8724485ad6.zip |
Kernel: Implement an asynchronous device request stack
This allows issuing asynchronous requests for devices and waiting
on the completion of the request. The requests can cascade into
multiple sub-requests.
Since IRQs may complete at any time, if the current process is no
longer the same that started the process, we need to swich the
paging context before accessing user buffers.
Change the PATA driver to use this model.
Diffstat (limited to 'Kernel/Devices/AsyncDeviceRequest.cpp')
-rw-r--r-- | Kernel/Devices/AsyncDeviceRequest.cpp | 175 |
1 files changed, 175 insertions, 0 deletions
diff --git a/Kernel/Devices/AsyncDeviceRequest.cpp b/Kernel/Devices/AsyncDeviceRequest.cpp new file mode 100644 index 0000000000..a114584085 --- /dev/null +++ b/Kernel/Devices/AsyncDeviceRequest.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2020, The SerenityOS developers. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <Kernel/Devices/AsyncDeviceRequest.h> +#include <Kernel/Devices/Device.h> + +namespace Kernel { + +AsyncDeviceRequest::AsyncDeviceRequest(Device& device) + : m_device(device) + , m_process(*Process::current()) +{ +} + +AsyncDeviceRequest::~AsyncDeviceRequest() +{ + { + ScopedSpinLock lock(m_lock); + ASSERT(is_completed_result(m_result)); + ASSERT(m_sub_requests_pending.is_empty()); + } + + // We should not need any locking here anymore. The destructor should + // only be called until either wait() or cancel() (once implemented) returned. + // At that point no sub-request should be adding more requests and all + // sub-requests should be completed (either succeeded, failed, or cancelled). + // Which means there should be no more pending sub-requests and the + // entire AsyncDeviceRequest hirarchy should be immutable. + for (auto& sub_request : m_sub_requests_complete) { + ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore + ASSERT(sub_request.m_parent_request == this); + sub_request.m_parent_request = nullptr; + } +} + +void AsyncDeviceRequest::request_finished() +{ + if (m_parent_request) + m_parent_request->sub_request_finished(*this); + + // Trigger processing the next request + m_device.process_next_queued_request({}, *this); + + // Wake anyone who may be waiting + m_queue.wake_all(); +} + +auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult +{ + ASSERT(!m_parent_request); + auto request_result = get_request_result(); + if (is_completed_result(request_result)) + return { request_result, Thread::BlockResult::NotBlocked }; + auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout); + return { get_request_result(), wait_result }; +} + +auto AsyncDeviceRequest::get_request_result() const -> RequestResult +{ + ScopedSpinLock lock(m_lock); + return m_result; +} + +void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request) +{ + // Sub-requests cannot be for the same device + ASSERT(&m_device != &sub_request->m_device); + ASSERT(sub_request->m_parent_request == nullptr); + sub_request->m_parent_request = this; + + bool should_start; + { + ScopedSpinLock lock(m_lock); + ASSERT(!is_completed_result(m_result)); + m_sub_requests_pending.append(sub_request); + should_start = (m_result == Started); + } + if (should_start) + sub_request->do_start(); +} + +void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request) +{ + bool all_completed; + { + ScopedSpinLock lock(m_lock); + ASSERT(m_result == Started); + size_t index; + for (index = 0; index < m_sub_requests_pending.size(); index++) { + if (&m_sub_requests_pending[index] == &sub_request) { + NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]); + m_sub_requests_pending.remove(index); + m_sub_requests_complete.append(move(request)); + break; + } + } + ASSERT(index < m_sub_requests_pending.size()); + all_completed = m_sub_requests_pending.is_empty(); + if (all_completed) { + // Aggregate any errors + bool any_failures = false; + bool any_memory_faults = false; + for (index = 0; index < m_sub_requests_complete.size(); index++) { + auto& sub_request = m_sub_requests_complete[index]; + auto sub_result = sub_request.get_request_result(); + ASSERT(is_completed_result(sub_result)); + switch (sub_result) { + case Failure: + any_failures = true; + break; + case MemoryFault: + any_memory_faults = true; + break; + default: + break; + } + if (any_failures && any_memory_faults) + break; // Stop checking if all error conditions were found + } + if (any_failures) + m_result = Failure; + else if (any_memory_faults) + m_result = MemoryFault; + else + m_result = Success; + } + } + if (all_completed) + request_finished(); +} + +void AsyncDeviceRequest::complete(RequestResult result) +{ + ASSERT(result == Success || result == Failure || result == MemoryFault); + ScopedCritical critical; + { + ScopedSpinLock lock(m_lock); + ASSERT(m_result == Started); + m_result = result; + } + if (Processor::current().in_irq()) { + ref(); // Make sure we don't get freed + Processor::deferred_call_queue([this]() { + request_finished(); + unref(); + }); + } else { + request_finished(); + } +} + +} |