diff options
author | Tom <tomut@yahoo.com> | 2020-11-02 11:16:01 -0700 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-11-12 18:04:30 +0100 |
commit | e445ff670dd1c715945832147ae3fb8724485ad6 (patch) | |
tree | b75644638c8a13b85288dd33d6eadbe3714ca6c7 /Kernel/Devices | |
parent | 91db31880faae9526693670bb76e186a59959887 (diff) | |
download | serenity-e445ff670dd1c715945832147ae3fb8724485ad6.zip |
Kernel: Implement an asynchronous device request stack
This allows issuing asynchronous requests for devices and waiting
on the completion of the request. The requests can cascade into
multiple sub-requests.
Since IRQs may complete at any time, if the current process is no
longer the same that started the process, we need to swich the
paging context before accessing user buffers.
Change the PATA driver to use this model.
Diffstat (limited to 'Kernel/Devices')
-rw-r--r-- | Kernel/Devices/AsyncDeviceRequest.cpp | 175 | ||||
-rw-r--r-- | Kernel/Devices/AsyncDeviceRequest.h | 175 | ||||
-rw-r--r-- | Kernel/Devices/BXVGADevice.h | 3 | ||||
-rw-r--r-- | Kernel/Devices/BlockDevice.cpp | 56 | ||||
-rw-r--r-- | Kernel/Devices/BlockDevice.h | 45 | ||||
-rw-r--r-- | Kernel/Devices/Device.cpp | 17 | ||||
-rw-r--r-- | Kernel/Devices/Device.h | 24 | ||||
-rw-r--r-- | Kernel/Devices/DiskPartition.cpp | 24 | ||||
-rw-r--r-- | Kernel/Devices/DiskPartition.h | 3 | ||||
-rw-r--r-- | Kernel/Devices/EBRPartitionTable.cpp | 3 | ||||
-rw-r--r-- | Kernel/Devices/GPTPartitionTable.cpp | 2 | ||||
-rw-r--r-- | Kernel/Devices/MBVGADevice.h | 3 | ||||
-rw-r--r-- | Kernel/Devices/PATAChannel.cpp | 364 | ||||
-rw-r--r-- | Kernel/Devices/PATAChannel.h | 25 | ||||
-rw-r--r-- | Kernel/Devices/PATADiskDevice.cpp | 124 | ||||
-rw-r--r-- | Kernel/Devices/PATADiskDevice.h | 10 |
16 files changed, 808 insertions, 245 deletions
diff --git a/Kernel/Devices/AsyncDeviceRequest.cpp b/Kernel/Devices/AsyncDeviceRequest.cpp new file mode 100644 index 0000000000..a114584085 --- /dev/null +++ b/Kernel/Devices/AsyncDeviceRequest.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2020, The SerenityOS developers. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <Kernel/Devices/AsyncDeviceRequest.h> +#include <Kernel/Devices/Device.h> + +namespace Kernel { + +AsyncDeviceRequest::AsyncDeviceRequest(Device& device) + : m_device(device) + , m_process(*Process::current()) +{ +} + +AsyncDeviceRequest::~AsyncDeviceRequest() +{ + { + ScopedSpinLock lock(m_lock); + ASSERT(is_completed_result(m_result)); + ASSERT(m_sub_requests_pending.is_empty()); + } + + // We should not need any locking here anymore. The destructor should + // only be called until either wait() or cancel() (once implemented) returned. + // At that point no sub-request should be adding more requests and all + // sub-requests should be completed (either succeeded, failed, or cancelled). + // Which means there should be no more pending sub-requests and the + // entire AsyncDeviceRequest hirarchy should be immutable. + for (auto& sub_request : m_sub_requests_complete) { + ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore + ASSERT(sub_request.m_parent_request == this); + sub_request.m_parent_request = nullptr; + } +} + +void AsyncDeviceRequest::request_finished() +{ + if (m_parent_request) + m_parent_request->sub_request_finished(*this); + + // Trigger processing the next request + m_device.process_next_queued_request({}, *this); + + // Wake anyone who may be waiting + m_queue.wake_all(); +} + +auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult +{ + ASSERT(!m_parent_request); + auto request_result = get_request_result(); + if (is_completed_result(request_result)) + return { request_result, Thread::BlockResult::NotBlocked }; + auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout); + return { get_request_result(), wait_result }; +} + +auto AsyncDeviceRequest::get_request_result() const -> RequestResult +{ + ScopedSpinLock lock(m_lock); + return m_result; +} + +void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request) +{ + // Sub-requests cannot be for the same device + ASSERT(&m_device != &sub_request->m_device); + ASSERT(sub_request->m_parent_request == nullptr); + sub_request->m_parent_request = this; + + bool should_start; + { + ScopedSpinLock lock(m_lock); + ASSERT(!is_completed_result(m_result)); + m_sub_requests_pending.append(sub_request); + should_start = (m_result == Started); + } + if (should_start) + sub_request->do_start(); +} + +void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request) +{ + bool all_completed; + { + ScopedSpinLock lock(m_lock); + ASSERT(m_result == Started); + size_t index; + for (index = 0; index < m_sub_requests_pending.size(); index++) { + if (&m_sub_requests_pending[index] == &sub_request) { + NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]); + m_sub_requests_pending.remove(index); + m_sub_requests_complete.append(move(request)); + break; + } + } + ASSERT(index < m_sub_requests_pending.size()); + all_completed = m_sub_requests_pending.is_empty(); + if (all_completed) { + // Aggregate any errors + bool any_failures = false; + bool any_memory_faults = false; + for (index = 0; index < m_sub_requests_complete.size(); index++) { + auto& sub_request = m_sub_requests_complete[index]; + auto sub_result = sub_request.get_request_result(); + ASSERT(is_completed_result(sub_result)); + switch (sub_result) { + case Failure: + any_failures = true; + break; + case MemoryFault: + any_memory_faults = true; + break; + default: + break; + } + if (any_failures && any_memory_faults) + break; // Stop checking if all error conditions were found + } + if (any_failures) + m_result = Failure; + else if (any_memory_faults) + m_result = MemoryFault; + else + m_result = Success; + } + } + if (all_completed) + request_finished(); +} + +void AsyncDeviceRequest::complete(RequestResult result) +{ + ASSERT(result == Success || result == Failure || result == MemoryFault); + ScopedCritical critical; + { + ScopedSpinLock lock(m_lock); + ASSERT(m_result == Started); + m_result = result; + } + if (Processor::current().in_irq()) { + ref(); // Make sure we don't get freed + Processor::deferred_call_queue([this]() { + request_finished(); + unref(); + }); + } else { + request_finished(); + } +} + +} diff --git a/Kernel/Devices/AsyncDeviceRequest.h b/Kernel/Devices/AsyncDeviceRequest.h new file mode 100644 index 0000000000..7d8010bd33 --- /dev/null +++ b/Kernel/Devices/AsyncDeviceRequest.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2020, The SerenityOS developers. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include <AK/NonnullRefPtrVector.h> +#include <Kernel/Process.h> +#include <Kernel/Thread.h> +#include <Kernel/UserOrKernelBuffer.h> +#include <Kernel/VM/ProcessPagingScope.h> +#include <Kernel/WaitQueue.h> + +namespace Kernel { + +class Device; + +class AsyncDeviceRequest : public RefCounted<AsyncDeviceRequest> { + AK_MAKE_NONCOPYABLE(AsyncDeviceRequest); + AK_MAKE_NONMOVABLE(AsyncDeviceRequest); + +public: + enum RequestResult { + Pending = 0, + Started, + Success, + Failure, + MemoryFault, + Cancelled + }; + + class RequestWaitResult { + friend class AsyncDeviceRequest; + + public: + RequestResult request_result() const { return m_request_result; } + Thread::BlockResult wait_result() const { return m_wait_result; } + + private: + RequestWaitResult(RequestResult request_result, Thread::BlockResult wait_result) + : m_request_result(request_result) + , m_wait_result(wait_result) + { + } + + RequestResult m_request_result; + Thread::BlockResult m_wait_result; + }; + + virtual ~AsyncDeviceRequest(); + + virtual const char* name() const = 0; + virtual void start() = 0; + + void add_sub_request(NonnullRefPtr<AsyncDeviceRequest>); + + [[nodiscard]] RequestWaitResult wait(timeval* = nullptr); + + void do_start(Badge<Device>) + { + do_start(); + } + + void complete(RequestResult result); + + void set_private(void* priv) + { + ASSERT(!m_private || !priv); + m_private = priv; + } + void* get_private() const { return m_private; } + + template<typename... Args> + [[nodiscard]] bool write_to_buffer(UserOrKernelBuffer& buffer, Args... args) + { + if (in_target_context(buffer)) + return buffer.write(forward<Args>(args)...); + ProcessPagingScope paging_scope(m_process); + return buffer.write(forward<Args>(args)...); + } + + template<size_t BUFFER_BYTES, typename... Args> + [[nodiscard]] bool write_to_buffer_buffered(UserOrKernelBuffer& buffer, Args... args) + { + if (in_target_context(buffer)) + return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...); + ProcessPagingScope paging_scope(m_process); + return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...); + } + + template<typename... Args> + [[nodiscard]] bool read_from_buffer(const UserOrKernelBuffer& buffer, Args... args) + { + if (in_target_context(buffer)) + return buffer.read(forward<Args>(args)...); + ProcessPagingScope paging_scope(m_process); + return buffer.read(forward<Args>(args)...); + } + + template<size_t BUFFER_BYTES, typename... Args> + [[nodiscard]] bool read_from_buffer_buffered(const UserOrKernelBuffer& buffer, Args... args) + { + if (in_target_context(buffer)) + return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...); + ProcessPagingScope paging_scope(m_process); + return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...); + } + +protected: + AsyncDeviceRequest(Device&); + + RequestResult get_request_result() const; + +private: + void sub_request_finished(AsyncDeviceRequest&); + void request_finished(); + + void do_start() + { + { + ScopedSpinLock lock(m_lock); + if (is_completed_result(m_result)) + return; + m_result = Started; + } + start(); + } + + bool in_target_context(const UserOrKernelBuffer& buffer) const + { + if (buffer.is_kernel_buffer()) + return true; + return m_process == Process::current(); + } + + static bool is_completed_result(RequestResult result) + { + return result > Started; + } + + Device& m_device; + + AsyncDeviceRequest* m_parent_request { nullptr }; + RequestResult m_result { Pending }; + NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_pending; + NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_complete; + WaitQueue m_queue; + NonnullRefPtr<Process> m_process; + void* m_private { nullptr }; + mutable SpinLock<u8> m_lock; +}; + +} diff --git a/Kernel/Devices/BXVGADevice.h b/Kernel/Devices/BXVGADevice.h index 6b98db525b..de3f4bfd72 100644 --- a/Kernel/Devices/BXVGADevice.h +++ b/Kernel/Devices/BXVGADevice.h @@ -48,10 +48,9 @@ private: virtual const char* class_name() const override { return "BXVGA"; } virtual bool can_read(const FileDescription&, size_t) const override { return true; } virtual bool can_write(const FileDescription&, size_t) const override { return true; } + virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); } virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; } - virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; } - virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; } void set_safe_resolution(); diff --git a/Kernel/Devices/BlockDevice.cpp b/Kernel/Devices/BlockDevice.cpp index d1f70a14dd..9e24f2dba8 100644 --- a/Kernel/Devices/BlockDevice.cpp +++ b/Kernel/Devices/BlockDevice.cpp @@ -28,18 +28,66 @@ namespace Kernel { +AsyncBlockDeviceRequest::AsyncBlockDeviceRequest(Device& block_device, RequestType request_type, u32 block_index, u32 block_count, const UserOrKernelBuffer& buffer, size_t buffer_size) + : AsyncDeviceRequest(block_device) + , m_block_device(static_cast<BlockDevice&>(block_device)) + , m_request_type(request_type) + , m_block_index(block_index) + , m_block_count(block_count) + , m_buffer(buffer) + , m_buffer_size(buffer_size) +{ +} + +void AsyncBlockDeviceRequest::start() +{ + m_block_device.start_request(*this); +} + BlockDevice::~BlockDevice() { } -bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer) const +bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer) { - return const_cast<BlockDevice*>(this)->read_blocks(index, 1, buffer); + auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, 1, buffer, 512); + switch (read_request->wait().request_result()) { + case AsyncDeviceRequest::Success: + return true; + case AsyncDeviceRequest::Failure: + dbg() << "BlockDevice::read_block(" << index << ") IO error"; + break; + case AsyncDeviceRequest::MemoryFault: + dbg() << "BlockDevice::read_block(" << index << ") EFAULT"; + break; + case AsyncDeviceRequest::Cancelled: + dbg() << "BlockDevice::read_block(" << index << ") cancelled"; + break; + default: + ASSERT_NOT_REACHED(); + } + return false; } -bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& data) +bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& buffer) { - return write_blocks(index, 1, data); + auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, 1, buffer, 512); + switch (write_request->wait().request_result()) { + case AsyncDeviceRequest::Success: + return true; + case AsyncDeviceRequest::Failure: + dbg() << "BlockDevice::write_block(" << index << ") IO error"; + break; + case AsyncDeviceRequest::MemoryFault: + dbg() << "BlockDevice::write_block(" << index << ") EFAULT"; + break; + case AsyncDeviceRequest::Cancelled: + dbg() << "BlockDevice::write_block(" << index << ") cancelled"; + break; + default: + ASSERT_NOT_REACHED(); + } + return false; } } diff --git a/Kernel/Devices/BlockDevice.h b/Kernel/Devices/BlockDevice.h index ee9a5bbead..975f46b7a8 100644 --- a/Kernel/Devices/BlockDevice.h +++ b/Kernel/Devices/BlockDevice.h @@ -30,6 +30,46 @@ namespace Kernel { +class BlockDevice; + +class AsyncBlockDeviceRequest : public AsyncDeviceRequest { +public: + enum RequestType { + Read, + Write + }; + AsyncBlockDeviceRequest(Device& block_device, RequestType request_type, + u32 block_index, u32 block_count, const UserOrKernelBuffer& buffer, size_t buffer_size); + + RequestType request_type() const { return m_request_type; } + u32 block_index() const { return m_block_index; } + u32 block_count() const { return m_block_count; } + UserOrKernelBuffer& buffer() { return m_buffer; } + const UserOrKernelBuffer& buffer() const { return m_buffer; } + size_t buffer_size() const { return m_buffer_size; } + + virtual void start() override; + virtual const char* name() const override + { + switch (m_request_type) { + case Read: + return "BlockDeviceRequest (read)"; + case Write: + return "BlockDeviceRequest (read)"; + default: + ASSERT_NOT_REACHED(); + } + } + +private: + BlockDevice& m_block_device; + const RequestType m_request_type; + const u32 m_block_index; + const u32 m_block_count; + UserOrKernelBuffer m_buffer; + const size_t m_buffer_size; +}; + class BlockDevice : public Device { public: virtual ~BlockDevice() override; @@ -37,11 +77,10 @@ public: size_t block_size() const { return m_block_size; } virtual bool is_seekable() const override { return true; } - bool read_block(unsigned index, UserOrKernelBuffer&) const; + bool read_block(unsigned index, UserOrKernelBuffer&); bool write_block(unsigned index, const UserOrKernelBuffer&); - virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) = 0; - virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) = 0; + virtual void start_request(AsyncBlockDeviceRequest&) = 0; protected: BlockDevice(unsigned major, unsigned minor, size_t block_size = PAGE_SIZE) diff --git a/Kernel/Devices/Device.cpp b/Kernel/Devices/Device.cpp index 934c587ba6..eda56e1cfe 100644 --- a/Kernel/Devices/Device.cpp +++ b/Kernel/Devices/Device.cpp @@ -80,4 +80,21 @@ String Device::absolute_path(const FileDescription&) const return absolute_path(); } +void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest& completed_request) +{ + AsyncDeviceRequest* next_request = nullptr; + + { + ScopedSpinLock lock(m_requests_lock); + ASSERT(!m_requests.is_empty()); + ASSERT(m_requests.first().ptr() == &completed_request); + m_requests.remove(m_requests.begin()); + if (!m_requests.is_empty()) + next_request = m_requests.first().ptr(); + } + + if (next_request) + next_request->start(); +} + } diff --git a/Kernel/Devices/Device.h b/Kernel/Devices/Device.h index 8fa97a02ab..80a63e22a8 100644 --- a/Kernel/Devices/Device.h +++ b/Kernel/Devices/Device.h @@ -34,10 +34,12 @@ // There are two main subclasses: // - BlockDevice (random access) // - CharacterDevice (sequential) +#include <AK/DoublyLinkedList.h> #include <AK/Function.h> #include <AK/HashMap.h> -#include <Kernel/Arch/i386/CPU.h> +#include <Kernel/Devices/AsyncDeviceRequest.h> #include <Kernel/FileSystem/File.h> +#include <Kernel/Lock.h> #include <Kernel/UnixTypes.h> namespace Kernel { @@ -61,6 +63,23 @@ public: static void for_each(Function<void(Device&)>); static Device* get_device(unsigned major, unsigned minor); + void process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest&); + + template<typename AsyncRequestType, typename... Args> + NonnullRefPtr<AsyncRequestType> make_request(Args&&... args) + { + auto request = adopt(*new AsyncRequestType(*this, forward<Args>(args)...)); + bool was_empty; + { + ScopedSpinLock lock(m_requests_lock); + was_empty = m_requests.is_empty(); + m_requests.append(request); + } + if (was_empty) + request->do_start({}); + return request; + } + protected: Device(unsigned major, unsigned minor); void set_uid(uid_t uid) { m_uid = uid; } @@ -73,6 +92,9 @@ private: unsigned m_minor { 0 }; uid_t m_uid { 0 }; gid_t m_gid { 0 }; + + SpinLock<u8> m_requests_lock; + DoublyLinkedList<RefPtr<AsyncDeviceRequest>> m_requests; }; } diff --git a/Kernel/Devices/DiskPartition.cpp b/Kernel/Devices/DiskPartition.cpp index d1ccb90d1b..d07961f74c 100644 --- a/Kernel/Devices/DiskPartition.cpp +++ b/Kernel/Devices/DiskPartition.cpp @@ -48,6 +48,12 @@ DiskPartition::~DiskPartition() { } +void DiskPartition::start_request(AsyncBlockDeviceRequest& request) +{ + request.add_sub_request(m_device->make_request<AsyncBlockDeviceRequest>(request.request_type(), + request.block_index() + m_block_offset, request.block_count(), request.buffer(), request.buffer_size())); +} + KResultOr<size_t> DiskPartition::read(FileDescription& fd, size_t offset, UserOrKernelBuffer& outbuf, size_t len) { unsigned adjust = m_block_offset * block_size(); @@ -92,24 +98,6 @@ bool DiskPartition::can_write(const FileDescription& fd, size_t offset) const return m_device->can_write(fd, offset + adjust); } -bool DiskPartition::read_blocks(unsigned index, u16 count, UserOrKernelBuffer& out) -{ -#ifdef OFFD_DEBUG - klog() << "DiskPartition::read_blocks " << index << " (really: " << (m_block_offset + index) << ") count=" << count; -#endif - - return m_device->read_blocks(m_block_offset + index, count, out); -} - -bool DiskPartition::write_blocks(unsigned index, u16 count, const UserOrKernelBuffer& data) -{ -#ifdef OFFD_DEBUG - klog() << "DiskPartition::write_blocks " << index << " (really: " << (m_block_offset + index) << ") count=" << count; -#endif - - return m_device->write_blocks(m_block_offset + index, count, data); -} - const char* DiskPartition::class_name() const { return "DiskPartition"; diff --git a/Kernel/Devices/DiskPartition.h b/Kernel/Devices/DiskPartition.h index e9551b8097..7d37354960 100644 --- a/Kernel/Devices/DiskPartition.h +++ b/Kernel/Devices/DiskPartition.h @@ -36,8 +36,7 @@ public: static NonnullRefPtr<DiskPartition> create(BlockDevice&, unsigned block_offset, unsigned block_limit); virtual ~DiskPartition(); - virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) override; - virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) override; + virtual void start_request(AsyncBlockDeviceRequest&) override; // ^BlockDevice virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override; diff --git a/Kernel/Devices/EBRPartitionTable.cpp b/Kernel/Devices/EBRPartitionTable.cpp index bfaf999162..6cdd52fb8a 100644 --- a/Kernel/Devices/EBRPartitionTable.cpp +++ b/Kernel/Devices/EBRPartitionTable.cpp @@ -63,6 +63,9 @@ int EBRPartitionTable::index_of_ebr_container() const bool EBRPartitionTable::initialize() { + auto mbr_header_request = m_device->make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, + 0, 1, UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header), sizeof(m_cached_mbr_header)); + auto mbr_header_buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header); if (!m_device->read_block(0, mbr_header_buffer)) { return false; diff --git a/Kernel/Devices/GPTPartitionTable.cpp b/Kernel/Devices/GPTPartitionTable.cpp index 9031269031..019f8994e1 100644 --- a/Kernel/Devices/GPTPartitionTable.cpp +++ b/Kernel/Devices/GPTPartitionTable.cpp @@ -84,7 +84,7 @@ RefPtr<DiskPartition> GPTPartitionTable::partition(unsigned index) GPTPartitionEntry entries[entries_per_sector]; auto entries_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&entries); - this->m_device->read_blocks(lba, 1, entries_buffer); + this->m_device->read_block(lba, entries_buffer); GPTPartitionEntry& entry = entries[((index - 1) % entries_per_sector)]; #ifdef GPT_DEBUG diff --git a/Kernel/Devices/MBVGADevice.h b/Kernel/Devices/MBVGADevice.h index 09dda1f853..9ee9d762ae 100644 --- a/Kernel/Devices/MBVGADevice.h +++ b/Kernel/Devices/MBVGADevice.h @@ -49,8 +49,7 @@ private: virtual bool can_write(const FileDescription&, size_t) const override { return true; } virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; } - virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; } - virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; } + virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); } size_t framebuffer_size_in_bytes() const { return m_framebuffer_pitch * m_framebuffer_height; } diff --git a/Kernel/Devices/PATAChannel.cpp b/Kernel/Devices/PATAChannel.cpp index cc47ff532c..fc3c8057ba 100644 --- a/Kernel/Devices/PATAChannel.cpp +++ b/Kernel/Devices/PATAChannel.cpp @@ -108,13 +108,6 @@ namespace Kernel { #define PCI_Mass_Storage_Class 0x1 #define PCI_IDE_Controller_Subclass 0x1 -static AK::Singleton<Lock> s_pata_lock; - -static Lock& s_lock() -{ - return *s_pata_lock; -}; - OwnPtr<PATAChannel> PATAChannel::create(ChannelType type, bool force_pio) { PCI::Address pci_address; @@ -141,17 +134,71 @@ PATAChannel::PATAChannel(PCI::Address address, ChannelType type, bool force_pio) initialize(force_pio); detect_disks(); - disable_irq(); + enable_irq(); } PATAChannel::~PATAChannel() { } -void PATAChannel::prepare_for_irq() +void PATAChannel::start_request(AsyncBlockDeviceRequest& request, bool use_dma, bool is_slave) { - cli(); - enable_irq(); + ScopedSpinLock lock(m_request_lock); +#ifdef PATA_DEBUG + dbg() << "PATAChannel::start_request"; +#endif + m_current_request = &request; + m_current_request_block_index = 0; + m_current_request_uses_dma = use_dma; + m_current_request_flushing_cache = false; + + if (request.request_type() == AsyncBlockDeviceRequest::Read) { + if (use_dma) + ata_read_sectors_with_dma(is_slave); + else + ata_read_sectors(is_slave); + } else { + if (use_dma) + ata_write_sectors_with_dma(is_slave); + else + ata_write_sectors(is_slave); + } +} + +void PATAChannel::complete_current_request(AsyncDeviceRequest::RequestResult result) +{ + // NOTE: this may be called from the interrupt handler! + ASSERT(m_current_request); + ASSERT(m_request_lock.is_locked()); + + // Now schedule reading back the buffer as soon as we leave the irq handler. + // This is important so that we can safely write the buffer back, + // which could cause page faults. Note that this may be called immediately + // before Processor::deferred_call_queue returns! + Processor::deferred_call_queue([this, result]() { +#ifdef PATA_DEBUG + dbg() << "PATAChannel::complete_current_request result: " << result; +#endif + ASSERT(m_current_request); + auto& request = *m_current_request; + m_current_request = nullptr; + + if (m_current_request_uses_dma) { + if (result == AsyncDeviceRequest::Success) { + if (request.request_type() == AsyncBlockDeviceRequest::Read) { + if (!request.write_to_buffer(request.buffer(), m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * request.block_count())) { + request.complete(AsyncDeviceRequest::MemoryFault); + return; + } + } + + // I read somewhere that this may trigger a cache flush so let's do it. + m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); + } + } + + request.complete(result); + }); } void PATAChannel::initialize(bool force_pio) @@ -175,12 +222,6 @@ static void print_ide_status(u8 status) klog() << "PATAChannel: print_ide_status: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0) << " DSC=" << ((status & ATA_SR_DSC) != 0) << " DF=" << ((status & ATA_SR_DF) != 0) << " CORR=" << ((status & ATA_SR_CORR) != 0) << " IDX=" << ((status & ATA_SR_IDX) != 0) << " ERR=" << ((status & ATA_SR_ERR) != 0); } -void PATAChannel::wait_for_irq() -{ - Thread::current()->wait_on(m_irq_queue, "PATAChannel"); - disable_irq(); -} - void PATAChannel::handle_irq(const RegisterState&) { u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); @@ -196,17 +237,66 @@ void PATAChannel::handle_irq(const RegisterState&) return; } + ScopedSpinLock lock(m_request_lock); +#ifdef PATA_DEBUG + klog() << "PATAChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0); +#endif + + if (!m_current_request) { +#ifdef PATA_DEBUG + dbg() << "PATAChannel: IRQ but no pending request!"; +#endif + return; + } + + bool received_all_irqs = m_current_request_uses_dma || m_current_request_block_index + 1 >= m_current_request->block_count(); + if (status & ATA_SR_ERR) { print_ide_status(status); m_device_error = m_io_base.offset(ATA_REG_ERROR).in<u8>(); klog() << "PATAChannel: Error " << String::format("%b", m_device_error) << "!"; + complete_current_request(AsyncDeviceRequest::Failure); + return; + } + + m_device_error = 0; + if (received_all_irqs) { + complete_current_request(AsyncDeviceRequest::Success); } else { - m_device_error = 0; + ASSERT(!m_current_request_uses_dma); + + // Now schedule reading/writing the buffer as soon as we leave the irq handler. + // This is important so that we can safely access the buffers, which could + // trigger page faults + Processor::deferred_call_queue([this]() { + if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) { + dbg() << "PATAChannel: Read block " << m_current_request_block_index << "/" << m_current_request->block_count(); + if (ata_do_read_sector()) { + if (++m_current_request_block_index >= m_current_request->block_count()) { + complete_current_request(AsyncDeviceRequest::Success); + return; + } + // Wait for the next block + enable_irq(); + } + } else { + if (!m_current_request_flushing_cache) { + dbg() << "PATAChannel: Wrote block " << m_current_request_block_index << "/" << m_current_request->block_count(); + if (++m_current_request_block_index >= m_current_request->block_count()) { + // We read the last block, flush cache + ASSERT(!m_current_request_flushing_cache); + m_current_request_flushing_cache = true; + m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH); + } else { + // Read next block + ata_do_write_sector(); + } + } else { + complete_current_request(AsyncDeviceRequest::Success); + } + } + }); } -#ifdef PATA_DEBUG - klog() << "PATAChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0); -#endif - m_irq_queue.wake_all(); } static void io_delay() @@ -274,15 +364,16 @@ void PATAChannel::detect_disks() } } -bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer& outbuf, bool slave_request) +void PATAChannel::ata_read_sectors_with_dma(bool slave_request) { - LOCKER(s_lock()); + auto& request = *m_current_request; + u32 lba = request.block_index(); #ifdef PATA_DEBUG - dbg() << "PATAChannel::ata_read_sectors_with_dma (" << lba << " x" << count << ") -> " << outbuf.user_or_kernel_ptr(); + dbg() << "PATAChannel::ata_read_sectors_with_dma (" << lba << " x" << request.block_count() << ")"; #endif prdt().offset = m_dma_buffer_page->paddr(); - prdt().size = 512 * count; + prdt().size = 512 * request.block_count(); ASSERT(prdt().size <= PAGE_SIZE); @@ -312,7 +403,7 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuff m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0); - m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); + m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count()); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); @@ -326,62 +417,60 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuff m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_DMA_EXT); io_delay(); - prepare_for_irq(); + enable_irq(); // Start bus master m_bus_master_base.out<u8>(0x9); +} - wait_for_irq(); - - if (m_device_error) +bool PATAChannel::ata_do_read_sector() +{ + auto& request = *m_current_request; + auto out_buffer = request.buffer().offset(m_current_request_block_index * 512); + ssize_t nwritten = request.write_to_buffer_buffered<512>(out_buffer, 512, [&](u8* buffer, size_t buffer_bytes) { + for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) + *(u16*)&buffer[i] = IO::in16(m_io_base.offset(ATA_REG_DATA).get()); + return (ssize_t)buffer_bytes; + }); + if (nwritten < 0) { + // TODO: Do we need to abort the PATA read if this wasn't the last block? + complete_current_request(AsyncDeviceRequest::MemoryFault); return false; - - if (!outbuf.write(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count)) - return false; // TODO: -EFAULT - - // I read somewhere that this may trigger a cache flush so let's do it. - m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); + } return true; } -bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer& inbuf, bool slave_request) +void PATAChannel::ata_read_sectors(bool slave_request) { - LOCKER(s_lock()); + auto& request = *m_current_request; + ASSERT(request.block_count() <= 256); #ifdef PATA_DEBUG - dbg() << "PATAChannel::ata_write_sectors_with_dma (" << lba << " x" << count << ") <- " << inbuf.user_or_kernel_ptr(); + dbg() << "PATAChannel::ata_read_sectors"; #endif - prdt().offset = m_dma_buffer_page->paddr(); - prdt().size = 512 * count; - - if (!inbuf.read(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count)) - return false; // TODO: -EFAULT - - ASSERT(prdt().size <= PAGE_SIZE); - - // Stop bus master - m_bus_master_base.out<u8>(0); - - // Write the PRDT location - m_bus_master_base.offset(4).out<u32>(m_prdt_page->paddr().get()); - - // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware. - m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); - while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) ; + auto lba = request.block_index(); +#ifdef PATA_DEBUG + klog() << "PATAChannel: Reading " << request.block_count() << " sector(s) @ LBA " << lba; +#endif + + u8 devsel = 0xe0; + if (slave_request) + devsel |= 0x10; + m_control_base.offset(ATA_CTL_CONTROL).out<u8>(0); - m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(0x40 | (static_cast<u8>(slave_request) << 4)); + m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(devsel | (static_cast<u8>(slave_request) << 4) | 0x40); io_delay(); - m_io_base.offset(ATA_REG_FEATURES).out<u16>(0); + m_io_base.offset(ATA_REG_FEATURES).out<u8>(0); m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(0); m_io_base.offset(ATA_REG_LBA0).out<u8>(0); m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0); - m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); + m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count()); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); @@ -392,53 +481,52 @@ bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const UserOrKer break; } - m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_DMA_EXT); - io_delay(); - - prepare_for_irq(); - // Start bus master - m_bus_master_base.out<u8>(0x1); - wait_for_irq(); - - if (m_device_error) - return false; - - // I read somewhere that this may trigger a cache flush so let's do it. - m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); - return true; + enable_irq(); + m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_PIO); } -bool PATAChannel::ata_read_sectors(u32 lba, u16 count, UserOrKernelBuffer& outbuf, bool slave_request) +void PATAChannel::ata_write_sectors_with_dma(bool slave_request) { - ASSERT(count <= 256); - LOCKER(s_lock()); + auto& request = *m_current_request; + u32 lba = request.block_index(); #ifdef PATA_DEBUG - dbg() << "PATAChannel::ata_read_sectors request (" << count << " sector(s) @ " << lba << " into " << outbuf.user_or_kernel_ptr() << ")"; + dbg() << "PATAChannel::ata_write_sectors_with_dma (" << lba << " x" << request.block_count() << ")"; #endif - while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) - ; + prdt().offset = m_dma_buffer_page->paddr(); + prdt().size = 512 * request.block_count(); -#ifdef PATA_DEBUG - klog() << "PATAChannel: Reading " << count << " sector(s) @ LBA " << lba; -#endif + if (!request.read_from_buffer(request.buffer(), m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * request.block_count())) { + complete_current_request(AsyncDeviceRequest::MemoryFault); + return; + } - u8 devsel = 0xe0; - if (slave_request) - devsel |= 0x10; + ASSERT(prdt().size <= PAGE_SIZE); + + // Stop bus master + m_bus_master_base.out<u8>(0); + + // Write the PRDT location + m_bus_master_base.offset(4).out<u32>(m_prdt_page->paddr().get()); + + // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware. + m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); + + while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) + ; m_control_base.offset(ATA_CTL_CONTROL).out<u8>(0); - m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(devsel | (static_cast<u8>(slave_request) << 4) | 0x40); + m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(0x40 | (static_cast<u8>(slave_request) << 4)); io_delay(); - m_io_base.offset(ATA_REG_FEATURES).out<u8>(0); + m_io_base.offset(ATA_REG_FEATURES).out<u16>(0); m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(0); m_io_base.offset(ATA_REG_LBA0).out<u8>(0); m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0); - m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); + m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count()); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); @@ -449,46 +537,45 @@ bool PATAChannel::ata_read_sectors(u32 lba, u16 count, UserOrKernelBuffer& outbu break; } - prepare_for_irq(); - m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_PIO); + m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_DMA_EXT); + io_delay(); - for (int i = 0; i < count; i++) { - if (i > 0) - prepare_for_irq(); - wait_for_irq(); - if (m_device_error) - return false; + enable_irq(); + // Start bus master + m_bus_master_base.out<u8>(0x1); +} - u8 status = m_control_base.offset(ATA_CTL_ALTSTATUS).in<u8>(); - ASSERT(!(status & ATA_SR_BSY)); +void PATAChannel::ata_do_write_sector() +{ + auto& request = *m_current_request; - auto out = outbuf.offset(i * 512); -#ifdef PATA_DEBUG - dbg() << "PATAChannel: Retrieving 512 bytes (part " << i << ") (status=" << String::format("%b", status) << "), outbuf=(" << out.user_or_kernel_ptr() << ")..."; -#endif - prepare_for_irq(); + io_delay(); + while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) + ; - ssize_t nwritten = out.write_buffered<512>(512, [&](u8* buffer, size_t buffer_bytes) { - for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) - *(u16*)&buffer[i] = IO::in16(m_io_base.offset(ATA_REG_DATA).get()); - return (ssize_t)buffer_bytes; - }); - if (nwritten < 0) { - sti(); - disable_irq(); - return false; // TODO: -EFAULT - } - } + u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); + ASSERT(status & ATA_SR_DRQ); - sti(); - disable_irq(); - return true; + auto in_buffer = request.buffer().offset(m_current_request_block_index * 512); +#ifdef PATA_DEBUG + dbg() << "PATAChannel: Writing 512 bytes (part " << m_current_request_block_index << ") (status=" << String::format("%b", status) << ")..."; +#endif + ssize_t nread = request.read_from_buffer_buffered<512>(in_buffer, 512, [&](const u8* buffer, size_t buffer_bytes) { + for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) + IO::out16(m_io_base.offset(ATA_REG_DATA).get(), *(const u16*)&buffer[i]); + return (ssize_t)buffer_bytes; + }); + if (nread < 0) + complete_current_request(AsyncDeviceRequest::MemoryFault); } -bool PATAChannel::ata_write_sectors(u32 start_sector, u16 count, const UserOrKernelBuffer& inbuf, bool slave_request) +void PATAChannel::ata_write_sectors(bool slave_request) { - ASSERT(count <= 256); - LOCKER(s_lock()); + auto& request = *m_current_request; + + ASSERT(request.block_count() <= 256); + u32 start_sector = request.block_index(); + u32 count = request.block_count(); #ifdef PATA_DEBUG klog() << "PATAChannel::ata_write_sectors request (" << count << " sector(s) @ " << start_sector << ")"; #endif @@ -516,37 +603,12 @@ bool PATAChannel::ata_write_sectors(u32 start_sector, u16 count, const UserOrKer m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_PIO); - for (int i = 0; i < count; i++) { - io_delay(); - while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) - ; - - u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); - ASSERT(status & ATA_SR_DRQ); - - auto in = inbuf.offset(i * 512); -#ifdef PATA_DEBUG - dbg() << "PATAChannel: Writing 512 bytes (part " << i << ") (status=" << String::format("%b", status) << "), inbuf=(" << in.user_or_kernel_ptr() << ")..."; -#endif - prepare_for_irq(); - ssize_t nread = in.read_buffered<512>(512, [&](const u8* buffer, size_t buffer_bytes) { - for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) - IO::out16(m_io_base.offset(ATA_REG_DATA).get(), *(const u16*)&buffer[i]); - return (ssize_t)buffer_bytes; - }); - wait_for_irq(); - status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); - ASSERT(!(status & ATA_SR_BSY)); - if (nread < 0) - return false; // TODO: -EFAULT - } - prepare_for_irq(); - m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH); - wait_for_irq(); - u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); - ASSERT(!(status & ATA_SR_BSY)); + io_delay(); + while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) + ; - return !m_device_error; + enable_irq(); + ata_do_write_sector(); } } diff --git a/Kernel/Devices/PATAChannel.h b/Kernel/Devices/PATAChannel.h index 40e331a4e8..e696abfe44 100644 --- a/Kernel/Devices/PATAChannel.h +++ b/Kernel/Devices/PATAChannel.h @@ -39,6 +39,7 @@ #include <AK/OwnPtr.h> #include <AK/RefPtr.h> +#include <Kernel/Devices/Device.h> #include <Kernel/IO.h> #include <Kernel/Lock.h> #include <Kernel/PCI/Access.h> @@ -50,6 +51,8 @@ namespace Kernel { +class AsyncBlockDeviceRequest; + struct PhysicalRegionDescriptor { PhysicalAddress offset; u16 size { 0 }; @@ -83,13 +86,15 @@ private: void initialize(bool force_pio); void detect_disks(); - void wait_for_irq(); - bool ata_read_sectors_with_dma(u32, u16, UserOrKernelBuffer&, bool); - bool ata_write_sectors_with_dma(u32, u16, const UserOrKernelBuffer&, bool); - bool ata_read_sectors(u32, u16, UserOrKernelBuffer&, bool); - bool ata_write_sectors(u32, u16, const UserOrKernelBuffer&, bool); + void start_request(AsyncBlockDeviceRequest&, bool, bool); + void complete_current_request(AsyncDeviceRequest::RequestResult); - inline void prepare_for_irq(); + void ata_read_sectors_with_dma(bool); + void ata_read_sectors(bool); + bool ata_do_read_sector(); + void ata_write_sectors_with_dma(bool); + void ata_write_sectors(bool); + void ata_do_write_sector(); // Data members u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave @@ -97,8 +102,6 @@ private: IOAddress m_control_base; volatile u8 m_device_error { 0 }; - WaitQueue m_irq_queue; - PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_page->paddr().offset(0xc0000000).as_ptr()); } RefPtr<PhysicalPage> m_prdt_page; RefPtr<PhysicalPage> m_dma_buffer_page; @@ -108,5 +111,11 @@ private: RefPtr<PATADiskDevice> m_master; RefPtr<PATADiskDevice> m_slave; + + AsyncBlockDeviceRequest* m_current_request { nullptr }; + u32 m_current_request_block_index { 0 }; + bool m_current_request_uses_dma { false }; + bool m_current_request_flushing_cache { false }; + SpinLock<u8> m_request_lock; }; } diff --git a/Kernel/Devices/PATADiskDevice.cpp b/Kernel/Devices/PATADiskDevice.cpp index a81f7bb557..8b85783a5b 100644 --- a/Kernel/Devices/PATADiskDevice.cpp +++ b/Kernel/Devices/PATADiskDevice.cpp @@ -55,22 +55,10 @@ const char* PATADiskDevice::class_name() const return "PATADiskDevice"; } -bool PATADiskDevice::read_blocks(unsigned index, u16 count, UserOrKernelBuffer& out) +void PATADiskDevice::start_request(AsyncBlockDeviceRequest& request) { - if (!m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource()) - return read_sectors_with_dma(index, count, out); - return read_sectors(index, count, out); -} - -bool PATADiskDevice::write_blocks(unsigned index, u16 count, const UserOrKernelBuffer& data) -{ - if (!m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource()) - return write_sectors_with_dma(index, count, data); - for (unsigned i = 0; i < count; ++i) { - if (!write_sectors(index + i, 1, data.offset(i * 512))) - return false; - } - return true; + bool use_dma = !m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource(); + m_channel.start_request(request, use_dma, is_slave()); } void PATADiskDevice::set_drive_geometry(u16 cyls, u16 heads, u16 spt) @@ -100,8 +88,19 @@ KResultOr<size_t> PATADiskDevice::read(FileDescription&, size_t offset, UserOrKe #endif if (whole_blocks > 0) { - if (!read_blocks(index, whole_blocks, outbuf)) - return -1; + auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, whole_blocks, outbuf, whole_blocks * block_size()); + auto result = read_request->wait(); + if (result.wait_result().was_interrupted()) + return KResult(-EINTR); + switch (result.request_result()) { + case AsyncDeviceRequest::Failure: + case AsyncDeviceRequest::Cancelled: + return KResult(-EIO); + case AsyncDeviceRequest::MemoryFault: + return KResult(-EFAULT); + default: + break; + } } off_t pos = whole_blocks * block_size(); @@ -109,8 +108,21 @@ KResultOr<size_t> PATADiskDevice::read(FileDescription&, size_t offset, UserOrKe if (remaining > 0) { auto data = ByteBuffer::create_uninitialized(block_size()); auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data()); - if (!read_blocks(index + whole_blocks, 1, data_buffer)) + auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size()); + auto result = read_request->wait(); + if (result.wait_result().was_interrupted()) + return KResult(-EINTR); + switch (result.request_result()) { + case AsyncDeviceRequest::Failure: return pos; + case AsyncDeviceRequest::Cancelled: + return KResult(-EIO); + case AsyncDeviceRequest::MemoryFault: + // This should never happen, we're writing to a kernel buffer! + ASSERT_NOT_REACHED(); + default: + break; + } if (!outbuf.write(data.data(), pos, remaining)) return KResult(-EFAULT); } @@ -143,8 +155,19 @@ KResultOr<size_t> PATADiskDevice::write(FileDescription&, size_t offset, const U #endif if (whole_blocks > 0) { - if (!write_blocks(index, whole_blocks, inbuf)) - return -1; + auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, whole_blocks, inbuf, whole_blocks * block_size()); + auto result = write_request->wait(); + if (result.wait_result().was_interrupted()) + return KResult(-EINTR); + switch (result.request_result()) { + case AsyncDeviceRequest::Failure: + case AsyncDeviceRequest::Cancelled: + return KResult(-EIO); + case AsyncDeviceRequest::MemoryFault: + return KResult(-EFAULT); + default: + break; + } } off_t pos = whole_blocks * block_size(); @@ -155,12 +178,45 @@ KResultOr<size_t> PATADiskDevice::write(FileDescription&, size_t offset, const U if (remaining > 0) { auto data = ByteBuffer::create_zeroed(block_size()); auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data()); - if (!read_blocks(index + whole_blocks, 1, data_buffer)) - return pos; + + { + auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size()); + auto result = read_request->wait(); + if (result.wait_result().was_interrupted()) + return KResult(-EINTR); + switch (result.request_result()) { + case AsyncDeviceRequest::Failure: + return pos; + case AsyncDeviceRequest::Cancelled: + return KResult(-EIO); + case AsyncDeviceRequest::MemoryFault: + // This should never happen, we're writing to a kernel buffer! + ASSERT_NOT_REACHED(); + default: + break; + } + } + if (!inbuf.read(data.data(), pos, remaining)) return KResult(-EFAULT); - if (!write_blocks(index + whole_blocks, 1, data_buffer)) - return pos; + + { + auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index + whole_blocks, 1, data_buffer, block_size()); + auto result = write_request->wait(); + if (result.wait_result().was_interrupted()) + return KResult(-EINTR); + switch (result.request_result()) { + case AsyncDeviceRequest::Failure: + return pos; + case AsyncDeviceRequest::Cancelled: + return KResult(-EIO); + case AsyncDeviceRequest::MemoryFault: + // This should never happen, we're writing to a kernel buffer! + ASSERT_NOT_REACHED(); + default: + break; + } + } } return pos + remaining; @@ -171,26 +227,6 @@ bool PATADiskDevice::can_write(const FileDescription&, size_t offset) const return offset < (m_cylinders * m_heads * m_sectors_per_track * block_size()); } -bool PATADiskDevice::read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer& outbuf) -{ - return m_channel.ata_read_sectors_with_dma(lba, count, outbuf, is_slave()); -} - -bool PATADiskDevice::read_sectors(u32 start_sector, u16 count, UserOrKernelBuffer& outbuf) -{ - return m_channel.ata_read_sectors(start_sector, count, outbuf, is_slave()); -} - -bool PATADiskDevice::write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer& inbuf) -{ - return m_channel.ata_write_sectors_with_dma(lba, count, inbuf, is_slave()); -} - -bool PATADiskDevice::write_sectors(u32 start_sector, u16 count, const UserOrKernelBuffer& inbuf) -{ - return m_channel.ata_write_sectors(start_sector, count, inbuf, is_slave()); -} - bool PATADiskDevice::is_slave() const { return m_drive_type == DriveType::Slave; diff --git a/Kernel/Devices/PATADiskDevice.h b/Kernel/Devices/PATADiskDevice.h index eb9377e1bf..1036b06fd5 100644 --- a/Kernel/Devices/PATADiskDevice.h +++ b/Kernel/Devices/PATADiskDevice.h @@ -54,13 +54,10 @@ public: static NonnullRefPtr<PATADiskDevice> create(PATAChannel&, DriveType, int major, int minor); virtual ~PATADiskDevice() override; - // ^DiskDevice - virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) override; - virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) override; - void set_drive_geometry(u16, u16, u16); // ^BlockDevice + virtual void start_request(AsyncBlockDeviceRequest&) override; virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override; virtual bool can_read(const FileDescription&, size_t) const override; virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override; @@ -73,11 +70,6 @@ private: // ^DiskDevice virtual const char* class_name() const override; - bool wait_for_irq(); - bool read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer&); - bool write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer&); - bool read_sectors(u32 lba, u16 count, UserOrKernelBuffer& buffer); - bool write_sectors(u32 lba, u16 count, const UserOrKernelBuffer& data); bool is_slave() const; Lock m_lock { "IDEDiskDevice" }; |