summaryrefslogtreecommitdiff
path: root/Kernel/Devices/AsyncDeviceRequest.cpp
diff options
context:
space:
mode:
authorBrian Gianforcaro <b.gianfo@gmail.com>2021-04-19 23:55:44 -0700
committerLinus Groh <mail@linusgroh.de>2021-04-21 19:31:49 +0200
commit033b2876351f77d7f15c23e9dd4e232c2d30b25f (patch)
tree454d7a4ee5796626bea5b72acde4b712273d2cf1 /Kernel/Devices/AsyncDeviceRequest.cpp
parentf1d832e5eaeede6314cfe589a79592fa92a89a02 (diff)
downloadserenity-033b2876351f77d7f15c23e9dd4e232c2d30b25f.zip
Kernel: Make AsyncDeviceRequest sub-req management alloc free
The previous implementation could allocate on insertion into the completed / pending sub request vectors. There's no reason these can't be intrusive lists instead. This is a very minor step towards improving the ability to handle OOM, as tracked by #6369 It might also help improve performance on the IO path in certain situations. I'll benchmark that later.
Diffstat (limited to 'Kernel/Devices/AsyncDeviceRequest.cpp')
-rw-r--r--Kernel/Devices/AsyncDeviceRequest.cpp30
1 files changed, 14 insertions, 16 deletions
diff --git a/Kernel/Devices/AsyncDeviceRequest.cpp b/Kernel/Devices/AsyncDeviceRequest.cpp
index 25977d1ee5..ad421389eb 100644
--- a/Kernel/Devices/AsyncDeviceRequest.cpp
+++ b/Kernel/Devices/AsyncDeviceRequest.cpp
@@ -49,10 +49,13 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
// sub-requests should be completed (either succeeded, failed, or cancelled).
// Which means there should be no more pending sub-requests and the
// entire AsyncDeviceRequest hierarchy should be immutable.
- for (auto& sub_request : m_sub_requests_complete) {
- VERIFY(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
- VERIFY(sub_request.m_parent_request == this);
- sub_request.m_parent_request = nullptr;
+ while (!m_sub_requests_complete.is_empty()) {
+ // Note: sub_request is ref-counted, and we use this specific pattern
+ // to allow make sure the refcount is dropped properly.
+ auto sub_request = m_sub_requests_complete.take_first();
+ VERIFY(is_completed_result(sub_request->m_result)); // Shouldn't need any locking anymore
+ VERIFY(sub_request->m_parent_request == this);
+ sub_request->m_parent_request = nullptr;
}
}
@@ -104,24 +107,19 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
{
ScopedSpinLock lock(m_lock);
VERIFY(m_result == Started);
- size_t index;
- for (index = 0; index < m_sub_requests_pending.size(); index++) {
- if (&m_sub_requests_pending[index] == &sub_request) {
- NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]);
- m_sub_requests_pending.remove(index);
- m_sub_requests_complete.append(move(request));
- break;
- }
+
+ if (m_sub_requests_pending.contains(sub_request)) {
+ // Note: append handles removing from any previous intrusive list internally.
+ m_sub_requests_complete.append(sub_request);
}
- VERIFY(index < m_sub_requests_pending.size());
+
all_completed = m_sub_requests_pending.is_empty();
if (all_completed) {
// Aggregate any errors
bool any_failures = false;
bool any_memory_faults = false;
- for (index = 0; index < m_sub_requests_complete.size(); index++) {
- auto& sub_request = m_sub_requests_complete[index];
- auto sub_result = sub_request.get_request_result();
+ for (auto& com_sub_request : m_sub_requests_complete) {
+ auto sub_result = com_sub_request.get_request_result();
VERIFY(is_completed_result(sub_result));
switch (sub_result) {
case Failure: