diff options
author | Andreas Kling <kling@serenityos.org> | 2020-11-04 21:25:26 +0100 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-11-04 21:25:26 +0100 |
commit | 501cef2bd70a22e8eb9d0ee81c5931d0561e47e7 (patch) | |
tree | a960361553967f59b2e0af1822d216567fc66d88 | |
parent | 70eaadc1cd48ae3f7e6b769d554e1ae04ffed5a3 (diff) | |
download | serenity-501cef2bd70a22e8eb9d0ee81c5931d0561e47e7.zip |
Revert "Kernel: Implement an asynchronous device request stack"
This reverts commit 2fd5ce1eb06e5cbbb180cba64a567e99f0cd846c.
This broke booting without SMP. (PR was #3921)
-rw-r--r-- | Kernel/CMakeLists.txt | 1 | ||||
-rw-r--r-- | Kernel/Devices/AsyncDeviceRequest.cpp | 175 | ||||
-rw-r--r-- | Kernel/Devices/AsyncDeviceRequest.h | 175 | ||||
-rw-r--r-- | Kernel/Devices/BXVGADevice.h | 3 | ||||
-rw-r--r-- | Kernel/Devices/BlockDevice.cpp | 56 | ||||
-rw-r--r-- | Kernel/Devices/BlockDevice.h | 45 | ||||
-rw-r--r-- | Kernel/Devices/Device.cpp | 17 | ||||
-rw-r--r-- | Kernel/Devices/Device.h | 24 | ||||
-rw-r--r-- | Kernel/Devices/DiskPartition.cpp | 24 | ||||
-rw-r--r-- | Kernel/Devices/DiskPartition.h | 3 | ||||
-rw-r--r-- | Kernel/Devices/EBRPartitionTable.cpp | 3 | ||||
-rw-r--r-- | Kernel/Devices/GPTPartitionTable.cpp | 2 | ||||
-rw-r--r-- | Kernel/Devices/MBVGADevice.h | 3 | ||||
-rw-r--r-- | Kernel/Devices/PATAChannel.cpp | 353 | ||||
-rw-r--r-- | Kernel/Devices/PATAChannel.h | 22 | ||||
-rw-r--r-- | Kernel/Devices/PATADiskDevice.cpp | 124 | ||||
-rw-r--r-- | Kernel/Devices/PATADiskDevice.h | 10 |
17 files changed, 241 insertions, 799 deletions
diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index 6112a857a9..e6c7997231 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -14,7 +14,6 @@ set(KERNEL_SOURCES CMOS.cpp CommandLine.cpp Console.cpp - Devices/AsyncDeviceRequest.cpp Devices/BXVGADevice.cpp Devices/BlockDevice.cpp Devices/CharacterDevice.cpp diff --git a/Kernel/Devices/AsyncDeviceRequest.cpp b/Kernel/Devices/AsyncDeviceRequest.cpp deleted file mode 100644 index a114584085..0000000000 --- a/Kernel/Devices/AsyncDeviceRequest.cpp +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2020, The SerenityOS developers. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <Kernel/Devices/AsyncDeviceRequest.h> -#include <Kernel/Devices/Device.h> - -namespace Kernel { - -AsyncDeviceRequest::AsyncDeviceRequest(Device& device) - : m_device(device) - , m_process(*Process::current()) -{ -} - -AsyncDeviceRequest::~AsyncDeviceRequest() -{ - { - ScopedSpinLock lock(m_lock); - ASSERT(is_completed_result(m_result)); - ASSERT(m_sub_requests_pending.is_empty()); - } - - // We should not need any locking here anymore. The destructor should - // only be called until either wait() or cancel() (once implemented) returned. - // At that point no sub-request should be adding more requests and all - // sub-requests should be completed (either succeeded, failed, or cancelled). - // Which means there should be no more pending sub-requests and the - // entire AsyncDeviceRequest hirarchy should be immutable. - for (auto& sub_request : m_sub_requests_complete) { - ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore - ASSERT(sub_request.m_parent_request == this); - sub_request.m_parent_request = nullptr; - } -} - -void AsyncDeviceRequest::request_finished() -{ - if (m_parent_request) - m_parent_request->sub_request_finished(*this); - - // Trigger processing the next request - m_device.process_next_queued_request({}, *this); - - // Wake anyone who may be waiting - m_queue.wake_all(); -} - -auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult -{ - ASSERT(!m_parent_request); - auto request_result = get_request_result(); - if (is_completed_result(request_result)) - return { request_result, Thread::BlockResult::NotBlocked }; - auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout); - return { get_request_result(), wait_result }; -} - -auto AsyncDeviceRequest::get_request_result() const -> RequestResult -{ - ScopedSpinLock lock(m_lock); - return m_result; -} - -void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request) -{ - // Sub-requests cannot be for the same device - ASSERT(&m_device != &sub_request->m_device); - ASSERT(sub_request->m_parent_request == nullptr); - sub_request->m_parent_request = this; - - bool should_start; - { - ScopedSpinLock lock(m_lock); - ASSERT(!is_completed_result(m_result)); - m_sub_requests_pending.append(sub_request); - should_start = (m_result == Started); - } - if (should_start) - sub_request->do_start(); -} - -void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request) -{ - bool all_completed; - { - ScopedSpinLock lock(m_lock); - ASSERT(m_result == Started); - size_t index; - for (index = 0; index < m_sub_requests_pending.size(); index++) { - if (&m_sub_requests_pending[index] == &sub_request) { - NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]); - m_sub_requests_pending.remove(index); - m_sub_requests_complete.append(move(request)); - break; - } - } - ASSERT(index < m_sub_requests_pending.size()); - all_completed = m_sub_requests_pending.is_empty(); - if (all_completed) { - // Aggregate any errors - bool any_failures = false; - bool any_memory_faults = false; - for (index = 0; index < m_sub_requests_complete.size(); index++) { - auto& sub_request = m_sub_requests_complete[index]; - auto sub_result = sub_request.get_request_result(); - ASSERT(is_completed_result(sub_result)); - switch (sub_result) { - case Failure: - any_failures = true; - break; - case MemoryFault: - any_memory_faults = true; - break; - default: - break; - } - if (any_failures && any_memory_faults) - break; // Stop checking if all error conditions were found - } - if (any_failures) - m_result = Failure; - else if (any_memory_faults) - m_result = MemoryFault; - else - m_result = Success; - } - } - if (all_completed) - request_finished(); -} - -void AsyncDeviceRequest::complete(RequestResult result) -{ - ASSERT(result == Success || result == Failure || result == MemoryFault); - ScopedCritical critical; - { - ScopedSpinLock lock(m_lock); - ASSERT(m_result == Started); - m_result = result; - } - if (Processor::current().in_irq()) { - ref(); // Make sure we don't get freed - Processor::deferred_call_queue([this]() { - request_finished(); - unref(); - }); - } else { - request_finished(); - } -} - -} diff --git a/Kernel/Devices/AsyncDeviceRequest.h b/Kernel/Devices/AsyncDeviceRequest.h deleted file mode 100644 index 7d8010bd33..0000000000 --- a/Kernel/Devices/AsyncDeviceRequest.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2020, The SerenityOS developers. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#pragma once - -#include <AK/NonnullRefPtrVector.h> -#include <Kernel/Process.h> -#include <Kernel/Thread.h> -#include <Kernel/UserOrKernelBuffer.h> -#include <Kernel/VM/ProcessPagingScope.h> -#include <Kernel/WaitQueue.h> - -namespace Kernel { - -class Device; - -class AsyncDeviceRequest : public RefCounted<AsyncDeviceRequest> { - AK_MAKE_NONCOPYABLE(AsyncDeviceRequest); - AK_MAKE_NONMOVABLE(AsyncDeviceRequest); - -public: - enum RequestResult { - Pending = 0, - Started, - Success, - Failure, - MemoryFault, - Cancelled - }; - - class RequestWaitResult { - friend class AsyncDeviceRequest; - - public: - RequestResult request_result() const { return m_request_result; } - Thread::BlockResult wait_result() const { return m_wait_result; } - - private: - RequestWaitResult(RequestResult request_result, Thread::BlockResult wait_result) - : m_request_result(request_result) - , m_wait_result(wait_result) - { - } - - RequestResult m_request_result; - Thread::BlockResult m_wait_result; - }; - - virtual ~AsyncDeviceRequest(); - - virtual const char* name() const = 0; - virtual void start() = 0; - - void add_sub_request(NonnullRefPtr<AsyncDeviceRequest>); - - [[nodiscard]] RequestWaitResult wait(timeval* = nullptr); - - void do_start(Badge<Device>) - { - do_start(); - } - - void complete(RequestResult result); - - void set_private(void* priv) - { - ASSERT(!m_private || !priv); - m_private = priv; - } - void* get_private() const { return m_private; } - - template<typename... Args> - [[nodiscard]] bool write_to_buffer(UserOrKernelBuffer& buffer, Args... args) - { - if (in_target_context(buffer)) - return buffer.write(forward<Args>(args)...); - ProcessPagingScope paging_scope(m_process); - return buffer.write(forward<Args>(args)...); - } - - template<size_t BUFFER_BYTES, typename... Args> - [[nodiscard]] bool write_to_buffer_buffered(UserOrKernelBuffer& buffer, Args... args) - { - if (in_target_context(buffer)) - return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...); - ProcessPagingScope paging_scope(m_process); - return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...); - } - - template<typename... Args> - [[nodiscard]] bool read_from_buffer(const UserOrKernelBuffer& buffer, Args... args) - { - if (in_target_context(buffer)) - return buffer.read(forward<Args>(args)...); - ProcessPagingScope paging_scope(m_process); - return buffer.read(forward<Args>(args)...); - } - - template<size_t BUFFER_BYTES, typename... Args> - [[nodiscard]] bool read_from_buffer_buffered(const UserOrKernelBuffer& buffer, Args... args) - { - if (in_target_context(buffer)) - return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...); - ProcessPagingScope paging_scope(m_process); - return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...); - } - -protected: - AsyncDeviceRequest(Device&); - - RequestResult get_request_result() const; - -private: - void sub_request_finished(AsyncDeviceRequest&); - void request_finished(); - - void do_start() - { - { - ScopedSpinLock lock(m_lock); - if (is_completed_result(m_result)) - return; - m_result = Started; - } - start(); - } - - bool in_target_context(const UserOrKernelBuffer& buffer) const - { - if (buffer.is_kernel_buffer()) - return true; - return m_process == Process::current(); - } - - static bool is_completed_result(RequestResult result) - { - return result > Started; - } - - Device& m_device; - - AsyncDeviceRequest* m_parent_request { nullptr }; - RequestResult m_result { Pending }; - NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_pending; - NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_complete; - WaitQueue m_queue; - NonnullRefPtr<Process> m_process; - void* m_private { nullptr }; - mutable SpinLock<u8> m_lock; -}; - -} diff --git a/Kernel/Devices/BXVGADevice.h b/Kernel/Devices/BXVGADevice.h index de3f4bfd72..6b98db525b 100644 --- a/Kernel/Devices/BXVGADevice.h +++ b/Kernel/Devices/BXVGADevice.h @@ -48,9 +48,10 @@ private: virtual const char* class_name() const override { return "BXVGA"; } virtual bool can_read(const FileDescription&, size_t) const override { return true; } virtual bool can_write(const FileDescription&, size_t) const override { return true; } - virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); } virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; } + virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; } + virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; } void set_safe_resolution(); diff --git a/Kernel/Devices/BlockDevice.cpp b/Kernel/Devices/BlockDevice.cpp index 9e24f2dba8..d1f70a14dd 100644 --- a/Kernel/Devices/BlockDevice.cpp +++ b/Kernel/Devices/BlockDevice.cpp @@ -28,66 +28,18 @@ namespace Kernel { -AsyncBlockDeviceRequest::AsyncBlockDeviceRequest(Device& block_device, RequestType request_type, u32 block_index, u32 block_count, const UserOrKernelBuffer& buffer, size_t buffer_size) - : AsyncDeviceRequest(block_device) - , m_block_device(static_cast<BlockDevice&>(block_device)) - , m_request_type(request_type) - , m_block_index(block_index) - , m_block_count(block_count) - , m_buffer(buffer) - , m_buffer_size(buffer_size) -{ -} - -void AsyncBlockDeviceRequest::start() -{ - m_block_device.start_request(*this); -} - BlockDevice::~BlockDevice() { } -bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer) +bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer) const { - auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, 1, buffer, 512); - switch (read_request->wait().request_result()) { - case AsyncDeviceRequest::Success: - return true; - case AsyncDeviceRequest::Failure: - dbg() << "BlockDevice::read_block(" << index << ") IO error"; - break; - case AsyncDeviceRequest::MemoryFault: - dbg() << "BlockDevice::read_block(" << index << ") EFAULT"; - break; - case AsyncDeviceRequest::Cancelled: - dbg() << "BlockDevice::read_block(" << index << ") cancelled"; - break; - default: - ASSERT_NOT_REACHED(); - } - return false; + return const_cast<BlockDevice*>(this)->read_blocks(index, 1, buffer); } -bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& buffer) +bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& data) { - auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, 1, buffer, 512); - switch (write_request->wait().request_result()) { - case AsyncDeviceRequest::Success: - return true; - case AsyncDeviceRequest::Failure: - dbg() << "BlockDevice::write_block(" << index << ") IO error"; - break; - case AsyncDeviceRequest::MemoryFault: - dbg() << "BlockDevice::write_block(" << index << ") EFAULT"; - break; - case AsyncDeviceRequest::Cancelled: - dbg() << "BlockDevice::write_block(" << index << ") cancelled"; - break; - default: - ASSERT_NOT_REACHED(); - } - return false; + return write_blocks(index, 1, data); } } diff --git a/Kernel/Devices/BlockDevice.h b/Kernel/Devices/BlockDevice.h index 975f46b7a8..ee9a5bbead 100644 --- a/Kernel/Devices/BlockDevice.h +++ b/Kernel/Devices/BlockDevice.h @@ -30,46 +30,6 @@ namespace Kernel { -class BlockDevice; - -class AsyncBlockDeviceRequest : public AsyncDeviceRequest { -public: - enum RequestType { - Read, - Write - }; - AsyncBlockDeviceRequest(Device& block_device, RequestType request_type, - u32 block_index, u32 block_count, const UserOrKernelBuffer& buffer, size_t buffer_size); - - RequestType request_type() const { return m_request_type; } - u32 block_index() const { return m_block_index; } - u32 block_count() const { return m_block_count; } - UserOrKernelBuffer& buffer() { return m_buffer; } - const UserOrKernelBuffer& buffer() const { return m_buffer; } - size_t buffer_size() const { return m_buffer_size; } - - virtual void start() override; - virtual const char* name() const override - { - switch (m_request_type) { - case Read: - return "BlockDeviceRequest (read)"; - case Write: - return "BlockDeviceRequest (read)"; - default: - ASSERT_NOT_REACHED(); - } - } - -private: - BlockDevice& m_block_device; - const RequestType m_request_type; - const u32 m_block_index; - const u32 m_block_count; - UserOrKernelBuffer m_buffer; - const size_t m_buffer_size; -}; - class BlockDevice : public Device { public: virtual ~BlockDevice() override; @@ -77,10 +37,11 @@ public: size_t block_size() const { return m_block_size; } virtual bool is_seekable() const override { return true; } - bool read_block(unsigned index, UserOrKernelBuffer&); + bool read_block(unsigned index, UserOrKernelBuffer&) const; bool write_block(unsigned index, const UserOrKernelBuffer&); - virtual void start_request(AsyncBlockDeviceRequest&) = 0; + virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) = 0; + virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) = 0; protected: BlockDevice(unsigned major, unsigned minor, size_t block_size = PAGE_SIZE) diff --git a/Kernel/Devices/Device.cpp b/Kernel/Devices/Device.cpp index eda56e1cfe..934c587ba6 100644 --- a/Kernel/Devices/Device.cpp +++ b/Kernel/Devices/Device.cpp @@ -80,21 +80,4 @@ String Device::absolute_path(const FileDescription&) const return absolute_path(); } -void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest& completed_request) -{ - AsyncDeviceRequest* next_request = nullptr; - - { - ScopedSpinLock lock(m_requests_lock); - ASSERT(!m_requests.is_empty()); - ASSERT(m_requests.first().ptr() == &completed_request); - m_requests.remove(m_requests.begin()); - if (!m_requests.is_empty()) - next_request = m_requests.first().ptr(); - } - - if (next_request) - next_request->start(); -} - } diff --git a/Kernel/Devices/Device.h b/Kernel/Devices/Device.h index 80a63e22a8..8fa97a02ab 100644 --- a/Kernel/Devices/Device.h +++ b/Kernel/Devices/Device.h @@ -34,12 +34,10 @@ // There are two main subclasses: // - BlockDevice (random access) // - CharacterDevice (sequential) -#include <AK/DoublyLinkedList.h> #include <AK/Function.h> #include <AK/HashMap.h> -#include <Kernel/Devices/AsyncDeviceRequest.h> +#include <Kernel/Arch/i386/CPU.h> #include <Kernel/FileSystem/File.h> -#include <Kernel/Lock.h> #include <Kernel/UnixTypes.h> namespace Kernel { @@ -63,23 +61,6 @@ public: static void for_each(Function<void(Device&)>); static Device* get_device(unsigned major, unsigned minor); - void process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest&); - - template<typename AsyncRequestType, typename... Args> - NonnullRefPtr<AsyncRequestType> make_request(Args&&... args) - { - auto request = adopt(*new AsyncRequestType(*this, forward<Args>(args)...)); - bool was_empty; - { - ScopedSpinLock lock(m_requests_lock); - was_empty = m_requests.is_empty(); - m_requests.append(request); - } - if (was_empty) - request->do_start({}); - return request; - } - protected: Device(unsigned major, unsigned minor); void set_uid(uid_t uid) { m_uid = uid; } @@ -92,9 +73,6 @@ private: unsigned m_minor { 0 }; uid_t m_uid { 0 }; gid_t m_gid { 0 }; - - SpinLock<u8> m_requests_lock; - DoublyLinkedList<RefPtr<AsyncDeviceRequest>> m_requests; }; } diff --git a/Kernel/Devices/DiskPartition.cpp b/Kernel/Devices/DiskPartition.cpp index d07961f74c..d1ccb90d1b 100644 --- a/Kernel/Devices/DiskPartition.cpp +++ b/Kernel/Devices/DiskPartition.cpp @@ -48,12 +48,6 @@ DiskPartition::~DiskPartition() { } -void DiskPartition::start_request(AsyncBlockDeviceRequest& request) -{ - request.add_sub_request(m_device->make_request<AsyncBlockDeviceRequest>(request.request_type(), - request.block_index() + m_block_offset, request.block_count(), request.buffer(), request.buffer_size())); -} - KResultOr<size_t> DiskPartition::read(FileDescription& fd, size_t offset, UserOrKernelBuffer& outbuf, size_t len) { unsigned adjust = m_block_offset * block_size(); @@ -98,6 +92,24 @@ bool DiskPartition::can_write(const FileDescription& fd, size_t offset) const return m_device->can_write(fd, offset + adjust); } +bool DiskPartition::read_blocks(unsigned index, u16 count, UserOrKernelBuffer& out) +{ +#ifdef OFFD_DEBUG + klog() << "DiskPartition::read_blocks " << index << " (really: " << (m_block_offset + index) << ") count=" << count; +#endif + + return m_device->read_blocks(m_block_offset + index, count, out); +} + +bool DiskPartition::write_blocks(unsigned index, u16 count, const UserOrKernelBuffer& data) +{ +#ifdef OFFD_DEBUG + klog() << "DiskPartition::write_blocks " << index << " (really: " << (m_block_offset + index) << ") count=" << count; +#endif + + return m_device->write_blocks(m_block_offset + index, count, data); +} + const char* DiskPartition::class_name() const { return "DiskPartition"; diff --git a/Kernel/Devices/DiskPartition.h b/Kernel/Devices/DiskPartition.h index 7d37354960..e9551b8097 100644 --- a/Kernel/Devices/DiskPartition.h +++ b/Kernel/Devices/DiskPartition.h @@ -36,7 +36,8 @@ public: static NonnullRefPtr<DiskPartition> create(BlockDevice&, unsigned block_offset, unsigned block_limit); virtual ~DiskPartition(); - virtual void start_request(AsyncBlockDeviceRequest&) override; + virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) override; + virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) override; // ^BlockDevice virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override; diff --git a/Kernel/Devices/EBRPartitionTable.cpp b/Kernel/Devices/EBRPartitionTable.cpp index 6cdd52fb8a..bfaf999162 100644 --- a/Kernel/Devices/EBRPartitionTable.cpp +++ b/Kernel/Devices/EBRPartitionTable.cpp @@ -63,9 +63,6 @@ int EBRPartitionTable::index_of_ebr_container() const bool EBRPartitionTable::initialize() { - auto mbr_header_request = m_device->make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, - 0, 1, UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header), sizeof(m_cached_mbr_header)); - auto mbr_header_buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header); if (!m_device->read_block(0, mbr_header_buffer)) { return false; diff --git a/Kernel/Devices/GPTPartitionTable.cpp b/Kernel/Devices/GPTPartitionTable.cpp index 019f8994e1..9031269031 100644 --- a/Kernel/Devices/GPTPartitionTable.cpp +++ b/Kernel/Devices/GPTPartitionTable.cpp @@ -84,7 +84,7 @@ RefPtr<DiskPartition> GPTPartitionTable::partition(unsigned index) GPTPartitionEntry entries[entries_per_sector]; auto entries_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&entries); - this->m_device->read_block(lba, entries_buffer); + this->m_device->read_blocks(lba, 1, entries_buffer); GPTPartitionEntry& entry = entries[((index - 1) % entries_per_sector)]; #ifdef GPT_DEBUG diff --git a/Kernel/Devices/MBVGADevice.h b/Kernel/Devices/MBVGADevice.h index 9ee9d762ae..09dda1f853 100644 --- a/Kernel/Devices/MBVGADevice.h +++ b/Kernel/Devices/MBVGADevice.h @@ -49,7 +49,8 @@ private: virtual bool can_write(const FileDescription&, size_t) const override { return true; } virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; } - virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); } + virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; } + virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; } size_t framebuffer_size_in_bytes() const { return m_framebuffer_pitch * m_framebuffer_height; } diff --git a/Kernel/Devices/PATAChannel.cpp b/Kernel/Devices/PATAChannel.cpp index d4f32096c1..cc47ff532c 100644 --- a/Kernel/Devices/PATAChannel.cpp +++ b/Kernel/Devices/PATAChannel.cpp @@ -108,6 +108,13 @@ namespace Kernel { #define PCI_Mass_Storage_Class 0x1 #define PCI_IDE_Controller_Subclass 0x1 +static AK::Singleton<Lock> s_pata_lock; + +static Lock& s_lock() +{ + return *s_pata_lock; +}; + OwnPtr<PATAChannel> PATAChannel::create(ChannelType type, bool force_pio) { PCI::Address pci_address; @@ -141,59 +148,10 @@ PATAChannel::~PATAChannel() { } -void PATAChannel::start_request(AsyncBlockDeviceRequest& request, bool use_dma, bool is_slave) +void PATAChannel::prepare_for_irq() { - m_current_request = &request; - m_current_request_block_index = 0; - m_current_request_uses_dma = use_dma; - m_current_request_flushing_cache = false; - - if (request.request_type() == AsyncBlockDeviceRequest::Read) { - if (use_dma) - ata_read_sectors_with_dma(is_slave); - else - ata_read_sectors(is_slave); - } else { - if (use_dma) - ata_write_sectors_with_dma(is_slave); - else - ata_write_sectors(is_slave); - } -} - -void PATAChannel::complete_current_request(AsyncDeviceRequest::RequestResult result) -{ - // NOTE: this may be called from the interrupt handler! - ASSERT(m_current_request); - - // Now schedule reading back the buffer as soon as we leave the irq handler. - // This is important so that we can safely write the buffer back, - // which could cause page faults. Note that this may be called immediately - // before Processor::deferred_call_queue returns! - Processor::deferred_call_queue([this, result]() { -#ifdef PATA_DEBUG - dbg() << "PATAChannel::complete_current_request result: " << result; -#endif - ASSERT(m_current_request); - auto& request = *m_current_request; - m_current_request = nullptr; - - if (m_current_request_uses_dma) { - if (result == AsyncDeviceRequest::Success) { - if (request.request_type() == AsyncBlockDeviceRequest::Read) { - if (!request.write_to_buffer(request.buffer(), m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * request.block_count())) { - request.complete(AsyncDeviceRequest::MemoryFault); - return; - } - } - - // I read somewhere that this may trigger a cache flush so let's do it. - m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); - } - } - - request.complete(result); - }); + cli(); + enable_irq(); } void PATAChannel::initialize(bool force_pio) @@ -217,6 +175,12 @@ static void print_ide_status(u8 status) klog() << "PATAChannel: print_ide_status: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0) << " DSC=" << ((status & ATA_SR_DSC) != 0) << " DF=" << ((status & ATA_SR_DF) != 0) << " CORR=" << ((status & ATA_SR_CORR) != 0) << " IDX=" << ((status & ATA_SR_IDX) != 0) << " ERR=" << ((status & ATA_SR_ERR) != 0); } +void PATAChannel::wait_for_irq() +{ + Thread::current()->wait_on(m_irq_queue, "PATAChannel"); + disable_irq(); +} + void PATAChannel::handle_irq(const RegisterState&) { u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); @@ -232,63 +196,16 @@ void PATAChannel::handle_irq(const RegisterState&) return; } -#ifdef PATA_DEBUG - klog() << "PATAChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0); -#endif - - bool received_all_irqs = m_current_request_uses_dma || m_current_request_block_index + 1 >= m_current_request->block_count(); - - disable_irq(); - if (status & ATA_SR_ERR) { print_ide_status(status); m_device_error = m_io_base.offset(ATA_REG_ERROR).in<u8>(); klog() << "PATAChannel: Error " << String::format("%b", m_device_error) << "!"; - complete_current_request(AsyncDeviceRequest::Failure); - return; - } - - m_device_error = 0; - if (received_all_irqs) { - complete_current_request(AsyncDeviceRequest::Success); } else { - ASSERT(!m_current_request_uses_dma); - - // Now schedule reading/writing the buffer as soon as we leave the irq handler. - // This is important so that we can safely access the buffers, which could - // trigger page faults - Processor::deferred_call_queue([this]() { - if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) { - dbg() << "PATAChannel: Read block " << m_current_request_block_index << "/" << m_current_request->block_count(); - if (ata_do_read_sector()) { - if (++m_current_request_block_index >= m_current_request->block_count()) { - complete_current_request(AsyncDeviceRequest::Success); - return; - } - // Wait for the next block - enable_irq(); - } - } else { - if (!m_current_request_flushing_cache) { - dbg() << "PATAChannel: Wrote block " << m_current_request_block_index << "/" << m_current_request->block_count(); - if (++m_current_request_block_index >= m_current_request->block_count()) { - // We read the last block, flush cache - ASSERT(!m_current_request_flushing_cache); - m_current_request_flushing_cache = true; - enable_irq(); - m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH); - } else { - // Read next block - enable_irq(); - ata_do_write_sector(); - } - } else { - complete_current_request(AsyncDeviceRequest::Success); - } - } - }); + m_device_error = 0; } - +#ifdef PATA_DEBUG + klog() << "PATAChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0); +#endif m_irq_queue.wake_all(); } @@ -357,16 +274,15 @@ void PATAChannel::detect_disks() } } -void PATAChannel::ata_read_sectors_with_dma(bool slave_request) +bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer& outbuf, bool slave_request) { - auto& request = *m_current_request; - u32 lba = request.block_index(); + LOCKER(s_lock()); #ifdef PATA_DEBUG - dbg() << "PATAChannel::ata_read_sectors_with_dma (" << lba << " x" << request.block_count() << ")"; + dbg() << "PATAChannel::ata_read_sectors_with_dma (" << lba << " x" << count << ") -> " << outbuf.user_or_kernel_ptr(); #endif prdt().offset = m_dma_buffer_page->paddr(); - prdt().size = 512 * request.block_count(); + prdt().size = 512 * count; ASSERT(prdt().size <= PAGE_SIZE); @@ -396,7 +312,7 @@ void PATAChannel::ata_read_sectors_with_dma(bool slave_request) m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0); - m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count()); + m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); @@ -410,60 +326,62 @@ void PATAChannel::ata_read_sectors_with_dma(bool slave_request) m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_DMA_EXT); io_delay(); - enable_irq(); + prepare_for_irq(); // Start bus master m_bus_master_base.out<u8>(0x9); -} -bool PATAChannel::ata_do_read_sector() -{ - auto& request = *m_current_request; - auto out_buffer = request.buffer().offset(m_current_request_block_index * 512); - ssize_t nwritten = request.write_to_buffer_buffered<512>(out_buffer, 512, [&](u8* buffer, size_t buffer_bytes) { - for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) - *(u16*)&buffer[i] = IO::in16(m_io_base.offset(ATA_REG_DATA).get()); - return (ssize_t)buffer_bytes; - }); - if (nwritten < 0) { - // TODO: Do we need to abort the PATA read if this wasn't the last block? - complete_current_request(AsyncDeviceRequest::MemoryFault); + wait_for_irq(); + + if (m_device_error) return false; - } + + if (!outbuf.write(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count)) + return false; // TODO: -EFAULT + + // I read somewhere that this may trigger a cache flush so let's do it. + m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); return true; } -void PATAChannel::ata_read_sectors(bool slave_request) +bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer& inbuf, bool slave_request) { - auto& request = *m_current_request; - ASSERT(request.block_count() <= 256); + LOCKER(s_lock()); #ifdef PATA_DEBUG - dbg() << "PATAChannel::ata_read_sectors"; + dbg() << "PATAChannel::ata_write_sectors_with_dma (" << lba << " x" << count << ") <- " << inbuf.user_or_kernel_ptr(); #endif - while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) - ; + prdt().offset = m_dma_buffer_page->paddr(); + prdt().size = 512 * count; - auto lba = request.block_index(); -#ifdef PATA_DEBUG - klog() << "PATAChannel: Reading " << request.block_count() << " sector(s) @ LBA " << lba; -#endif + if (!inbuf.read(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count)) + return false; // TODO: -EFAULT - u8 devsel = 0xe0; - if (slave_request) - devsel |= 0x10; + ASSERT(prdt().size <= PAGE_SIZE); + + // Stop bus master + m_bus_master_base.out<u8>(0); + + // Write the PRDT location + m_bus_master_base.offset(4).out<u32>(m_prdt_page->paddr().get()); + + // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware. + m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); + + while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) + ; m_control_base.offset(ATA_CTL_CONTROL).out<u8>(0); - m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(devsel | (static_cast<u8>(slave_request) << 4) | 0x40); + m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(0x40 | (static_cast<u8>(slave_request) << 4)); io_delay(); - m_io_base.offset(ATA_REG_FEATURES).out<u8>(0); + m_io_base.offset(ATA_REG_FEATURES).out<u16>(0); m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(0); m_io_base.offset(ATA_REG_LBA0).out<u8>(0); m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0); - m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count()); + m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); @@ -474,52 +392,53 @@ void PATAChannel::ata_read_sectors(bool slave_request) break; } - enable_irq(); - m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_PIO); + m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_DMA_EXT); + io_delay(); + + prepare_for_irq(); + // Start bus master + m_bus_master_base.out<u8>(0x1); + wait_for_irq(); + + if (m_device_error) + return false; + + // I read somewhere that this may trigger a cache flush so let's do it. + m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); + return true; } -void PATAChannel::ata_write_sectors_with_dma(bool slave_request) +bool PATAChannel::ata_read_sectors(u32 lba, u16 count, UserOrKernelBuffer& outbuf, bool slave_request) { - auto& request = *m_current_request; - u32 lba = request.block_index(); + ASSERT(count <= 256); + LOCKER(s_lock()); #ifdef PATA_DEBUG - dbg() << "PATAChannel::ata_write_sectors_with_dma (" << lba << " x" << request.block_count() << ")"; + dbg() << "PATAChannel::ata_read_sectors request (" << count << " sector(s) @ " << lba << " into " << outbuf.user_or_kernel_ptr() << ")"; #endif - prdt().offset = m_dma_buffer_page->paddr(); - prdt().size = 512 * request.block_count(); - - if (!request.read_from_buffer(request.buffer(), m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * request.block_count())) { - complete_current_request(AsyncDeviceRequest::MemoryFault); - return; - } - - ASSERT(prdt().size <= PAGE_SIZE); - - // Stop bus master - m_bus_master_base.out<u8>(0); - - // Write the PRDT location - m_bus_master_base.offset(4).out<u32>(m_prdt_page->paddr().get()); - - // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware. - m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6); - while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) ; +#ifdef PATA_DEBUG + klog() << "PATAChannel: Reading " << count << " sector(s) @ LBA " << lba; +#endif + + u8 devsel = 0xe0; + if (slave_request) + devsel |= 0x10; + m_control_base.offset(ATA_CTL_CONTROL).out<u8>(0); - m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(0x40 | (static_cast<u8>(slave_request) << 4)); + m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(devsel | (static_cast<u8>(slave_request) << 4) | 0x40); io_delay(); - m_io_base.offset(ATA_REG_FEATURES).out<u16>(0); + m_io_base.offset(ATA_REG_FEATURES).out<u8>(0); m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(0); m_io_base.offset(ATA_REG_LBA0).out<u8>(0); m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0); - m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count()); + m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); @@ -530,45 +449,46 @@ void PATAChannel::ata_write_sectors_with_dma(bool slave_request) break; } - m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_DMA_EXT); - io_delay(); - - enable_irq(); - // Start bus master - m_bus_master_base.out<u8>(0x1); -} - -void PATAChannel::ata_do_write_sector() -{ - auto& request = *m_current_request; + prepare_for_irq(); + m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_PIO); - io_delay(); - while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) - ; + for (int i = 0; i < count; i++) { + if (i > 0) + prepare_for_irq(); + wait_for_irq(); + if (m_device_error) + return false; - u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); - ASSERT(status & ATA_SR_DRQ); + u8 status = m_control_base.offset(ATA_CTL_ALTSTATUS).in<u8>(); + ASSERT(!(status & ATA_SR_BSY)); - auto in_buffer = request.buffer().offset(m_current_request_block_index * 512); + auto out = outbuf.offset(i * 512); #ifdef PATA_DEBUG - dbg() << "PATAChannel: Writing 512 bytes (part " << m_current_request_block_index << ") (status=" << String::format("%b", status) << ")..."; + dbg() << "PATAChannel: Retrieving 512 bytes (part " << i << ") (status=" << String::format("%b", status) << "), outbuf=(" << out.user_or_kernel_ptr() << ")..."; #endif - ssize_t nread = request.read_from_buffer_buffered<512>(in_buffer, 512, [&](const u8* buffer, size_t buffer_bytes) { - for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) - IO::out16(m_io_base.offset(ATA_REG_DATA).get(), *(const u16*)&buffer[i]); - return (ssize_t)buffer_bytes; - }); - if (nread < 0) - complete_current_request(AsyncDeviceRequest::MemoryFault); + prepare_for_irq(); + + ssize_t nwritten = out.write_buffered<512>(512, [&](u8* buffer, size_t buffer_bytes) { + for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) + *(u16*)&buffer[i] = IO::in16(m_io_base.offset(ATA_REG_DATA).get()); + return (ssize_t)buffer_bytes; + }); + if (nwritten < 0) { + sti(); + disable_irq(); + return false; // TODO: -EFAULT + } + } + + sti(); + disable_irq(); + return true; } -void PATAChannel::ata_write_sectors(bool slave_request) +bool PATAChannel::ata_write_sectors(u32 start_sector, u16 count, const UserOrKernelBuffer& inbuf, bool slave_request) { - auto& request = *m_current_request; - - ASSERT(request.block_count() <= 256); - u32 start_sector = request.block_index(); - u32 count = request.block_count(); + ASSERT(count <= 256); + LOCKER(s_lock()); #ifdef PATA_DEBUG klog() << "PATAChannel::ata_write_sectors request (" << count << " sector(s) @ " << start_sector << ")"; #endif @@ -596,12 +516,37 @@ void PATAChannel::ata_write_sectors(bool slave_request) m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_PIO); - io_delay(); - while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) - ; + for (int i = 0; i < count; i++) { + io_delay(); + while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) + ; - enable_irq(); - ata_do_write_sector(); + u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); + ASSERT(status & ATA_SR_DRQ); + + auto in = inbuf.offset(i * 512); +#ifdef PATA_DEBUG + dbg() << "PATAChannel: Writing 512 bytes (part " << i << ") (status=" << String::format("%b", status) << "), inbuf=(" << in.user_or_kernel_ptr() << ")..."; +#endif + prepare_for_irq(); + ssize_t nread = in.read_buffered<512>(512, [&](const u8* buffer, size_t buffer_bytes) { + for (size_t i = 0; i < buffer_bytes; i += sizeof(u16)) + IO::out16(m_io_base.offset(ATA_REG_DATA).get(), *(const u16*)&buffer[i]); + return (ssize_t)buffer_bytes; + }); + wait_for_irq(); + status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); + ASSERT(!(status & ATA_SR_BSY)); + if (nread < 0) + return false; // TODO: -EFAULT + } + prepare_for_irq(); + m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH); + wait_for_irq(); + u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); + ASSERT(!(status & ATA_SR_BSY)); + + return !m_device_error; } } diff --git a/Kernel/Devices/PATAChannel.h b/Kernel/Devices/PATAChannel.h index 033598ae5e..40e331a4e8 100644 --- a/Kernel/Devices/PATAChannel.h +++ b/Kernel/Devices/PATAChannel.h @@ -39,7 +39,6 @@ #include <AK/OwnPtr.h> #include <AK/RefPtr.h> -#include <Kernel/Devices/Device.h> #include <Kernel/IO.h> #include <Kernel/Lock.h> #include <Kernel/PCI/Access.h> @@ -51,8 +50,6 @@ namespace Kernel { -class AsyncBlockDeviceRequest; - struct PhysicalRegionDescriptor { PhysicalAddress offset; u16 size { 0 }; @@ -86,15 +83,13 @@ private: void initialize(bool force_pio); void detect_disks(); - void start_request(AsyncBlockDeviceRequest&, bool, bool); - void complete_current_request(AsyncDeviceRequest::RequestResult); + void wait_for_irq(); + bool ata_read_sectors_with_dma(u32, u16, UserOrKernelBuffer&, bool); + bool ata_write_sectors_with_dma(u32, u16, const UserOrKernelBuffer&, bool); + bool ata_read_sectors(u32, u16, UserOrKernelBuffer&, bool); + bool ata_write_sectors(u32, u16, const UserOrKernelBuffer&, bool); - void ata_read_sectors_with_dma(bool); - void ata_read_sectors(bool); - bool ata_do_read_sector(); - void ata_write_sectors_with_dma(bool); - void ata_write_sectors(bool); - void ata_do_write_sector(); + inline void prepare_for_irq(); // Data members u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave @@ -113,10 +108,5 @@ private: RefPtr<PATADiskDevice> m_master; RefPtr<PATADiskDevice> m_slave; - - AsyncBlockDeviceRequest* m_current_request { nullptr }; - u32 m_current_request_block_index { 0 }; - bool m_current_request_uses_dma { false }; - bool m_current_request_flushing_cache { false }; }; } diff --git a/Kernel/Devices/PATADiskDevice.cpp b/Kernel/Devices/PATADiskDevice.cpp index 8b85783a5b..a81f7bb557 100644 --- a/Kernel/Devices/PATADiskDevice.cpp +++ b/Kernel/Devices/PATADiskDevice.cpp @@ -55,10 +55,22 @@ const char* PATADiskDevice::class_name() const return "PATADiskDevice"; } -void PATADiskDevice::start_request(AsyncBlockDeviceRequest& request) +bool PATADiskDevice::read_blocks(unsigned index, u16 count, UserOrKernelBuffer& out) { - bool use_dma = !m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource(); - m_channel.start_request(request, use_dma, is_slave()); + if (!m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource()) + return read_sectors_with_dma(index, count, out); + return read_sectors(index, count, out); +} + +bool PATADiskDevice::write_blocks(unsigned index, u16 count, const UserOrKernelBuffer& data) +{ + if (!m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource()) + return write_sectors_with_dma(index, count, data); + for (unsigned i = 0; i < count; ++i) { + if (!write_sectors(index + i, 1, data.offset(i * 512))) + return false; + } + return true; } void PATADiskDevice::set_drive_geometry(u16 cyls, u16 heads, u16 spt) @@ -88,19 +100,8 @@ KResultOr<size_t> PATADiskDevice::read(FileDescription&, size_t offset, UserOrKe #endif if (whole_blocks > 0) { - auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, whole_blocks, outbuf, whole_blocks * block_size()); - auto result = read_request->wait(); - if (result.wait_result().was_interrupted()) - return KResult(-EINTR); - switch (result.request_result()) { - case AsyncDeviceRequest::Failure: - case AsyncDeviceRequest::Cancelled: - return KResult(-EIO); - case AsyncDeviceRequest::MemoryFault: - return KResult(-EFAULT); - default: - break; - } + if (!read_blocks(index, whole_blocks, outbuf)) + return -1; } off_t pos = whole_blocks * block_size(); @@ -108,21 +109,8 @@ KResultOr<size_t> PATADiskDevice::read(FileDescription&, size_t offset, UserOrKe if (remaining > 0) { auto data = ByteBuffer::create_uninitialized(block_size()); auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data()); - auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size()); - auto result = read_request->wait(); - if (result.wait_result().was_interrupted()) - return KResult(-EINTR); - switch (result.request_result()) { - case AsyncDeviceRequest::Failure: + if (!read_blocks(index + whole_blocks, 1, data_buffer)) return pos; - case AsyncDeviceRequest::Cancelled: - return KResult(-EIO); - case AsyncDeviceRequest::MemoryFault: - // This should never happen, we're writing to a kernel buffer! - ASSERT_NOT_REACHED(); - default: - break; - } if (!outbuf.write(data.data(), pos, remaining)) return KResult(-EFAULT); } @@ -155,19 +143,8 @@ KResultOr<size_t> PATADiskDevice::write(FileDescription&, size_t offset, const U #endif if (whole_blocks > 0) { - auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, whole_blocks, inbuf, whole_blocks * block_size()); - auto result = write_request->wait(); - if (result.wait_result().was_interrupted()) - return KResult(-EINTR); - switch (result.request_result()) { - case AsyncDeviceRequest::Failure: - case AsyncDeviceRequest::Cancelled: - return KResult(-EIO); - case AsyncDeviceRequest::MemoryFault: - return KResult(-EFAULT); - default: - break; - } + if (!write_blocks(index, whole_blocks, inbuf)) + return -1; } off_t pos = whole_blocks * block_size(); @@ -178,45 +155,12 @@ KResultOr<size_t> PATADiskDevice::write(FileDescription&, size_t offset, const U if (remaining > 0) { auto data = ByteBuffer::create_zeroed(block_size()); auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data()); - - { - auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size()); - auto result = read_request->wait(); - if (result.wait_result().was_interrupted()) - return KResult(-EINTR); - switch (result.request_result()) { - case AsyncDeviceRequest::Failure: - return pos; - case AsyncDeviceRequest::Cancelled: - return KResult(-EIO); - case AsyncDeviceRequest::MemoryFault: - // This should never happen, we're writing to a kernel buffer! - ASSERT_NOT_REACHED(); - default: - break; - } - } - + if (!read_blocks(index + whole_blocks, 1, data_buffer)) + return pos; if (!inbuf.read(data.data(), pos, remaining)) return KResult(-EFAULT); - - { - auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index + whole_blocks, 1, data_buffer, block_size()); - auto result = write_request->wait(); - if (result.wait_result().was_interrupted()) - return KResult(-EINTR); - switch (result.request_result()) { - case AsyncDeviceRequest::Failure: - return pos; - case AsyncDeviceRequest::Cancelled: - return KResult(-EIO); - case AsyncDeviceRequest::MemoryFault: - // This should never happen, we're writing to a kernel buffer! - ASSERT_NOT_REACHED(); - default: - break; - } - } + if (!write_blocks(index + whole_blocks, 1, data_buffer)) + return pos; } return pos + remaining; @@ -227,6 +171,26 @@ bool PATADiskDevice::can_write(const FileDescription&, size_t offset) const return offset < (m_cylinders * m_heads * m_sectors_per_track * block_size()); } +bool PATADiskDevice::read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer& outbuf) +{ + return m_channel.ata_read_sectors_with_dma(lba, count, outbuf, is_slave()); +} + +bool PATADiskDevice::read_sectors(u32 start_sector, u16 count, UserOrKernelBuffer& outbuf) +{ + return m_channel.ata_read_sectors(start_sector, count, outbuf, is_slave()); +} + +bool PATADiskDevice::write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer& inbuf) +{ + return m_channel.ata_write_sectors_with_dma(lba, count, inbuf, is_slave()); +} + +bool PATADiskDevice::write_sectors(u32 start_sector, u16 count, const UserOrKernelBuffer& inbuf) +{ + return m_channel.ata_write_sectors(start_sector, count, inbuf, is_slave()); +} + bool PATADiskDevice::is_slave() const { return m_drive_type == DriveType::Slave; diff --git a/Kernel/Devices/PATADiskDevice.h b/Kernel/Devices/PATADiskDevice.h index 1036b06fd5..eb9377e1bf 100644 --- a/Kernel/Devices/PATADiskDevice.h +++ b/Kernel/Devices/PATADiskDevice.h @@ -54,10 +54,13 @@ public: static NonnullRefPtr<PATADiskDevice> create(PATAChannel&, DriveType, int major, int minor); virtual ~PATADiskDevice() override; + // ^DiskDevice + virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) override; + virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) override; + void set_drive_geometry(u16, u16, u16); // ^BlockDevice - virtual void start_request(AsyncBlockDeviceRequest&) override; virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override; virtual bool can_read(const FileDescription&, size_t) const override; virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override; @@ -70,6 +73,11 @@ private: // ^DiskDevice virtual const char* class_name() const override; + bool wait_for_irq(); + bool read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer&); + bool write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer&); + bool read_sectors(u32 lba, u16 count, UserOrKernelBuffer& buffer); + bool write_sectors(u32 lba, u16 count, const UserOrKernelBuffer& data); bool is_slave() const; Lock m_lock { "IDEDiskDevice" }; |