diff options
author | Tom <tomut@yahoo.com> | 2020-07-05 15:46:51 -0600 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-07-06 10:00:24 +0200 |
commit | 9725bda63ee7f4a3f2586ce41b174d6c51a23b58 (patch) | |
tree | 1a334aee9554e8f85c1cb2fabf47a3d768f01c69 /Kernel | |
parent | 2a82a25fecd5342710410c8a89a9b949e6068ce8 (diff) | |
download | serenity-9725bda63ee7f4a3f2586ce41b174d6c51a23b58.zip |
Kernel: Enhance WaitQueue to remember pending wakes
If WaitQueue::wake_all, WaitQueue::wake_one, or WaitQueue::wake_n
is called but nobody is currently waiting, we should remember that
fact and prevent someone from waiting after such a request. This
solves a race condition where the Finalizer thread is notified
to finalize a thread, but it is not (yet) waiting on this queue.
Fixes #2693
Diffstat (limited to 'Kernel')
-rw-r--r-- | Kernel/Lock.cpp | 28 | ||||
-rw-r--r-- | Kernel/Thread.cpp | 8 | ||||
-rw-r--r-- | Kernel/Thread.h | 1 | ||||
-rw-r--r-- | Kernel/WaitQueue.cpp | 78 | ||||
-rw-r--r-- | Kernel/WaitQueue.h | 5 |
5 files changed, 93 insertions, 27 deletions
diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index 74f6ae4074..f7fa7512fb 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -54,20 +54,20 @@ void Lock::lock(Mode mode) for (;;) { bool expected = false; if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) { - // FIXME: Do not add new readers if writers are queued. - bool modes_dont_conflict = !modes_conflict(m_mode, mode); - bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread; - if (modes_dont_conflict || already_hold_exclusive_lock) { - // We got the lock! - if (!already_hold_exclusive_lock) - m_mode = mode; - m_holder = current_thread; - m_times_locked++; - m_lock.store(false, AK::memory_order_release); - return; - } - timeval* timeout = nullptr; - current_thread->wait_on(m_queue, m_name, timeout, &m_lock, m_holder); + do { + // FIXME: Do not add new readers if writers are queued. + bool modes_dont_conflict = !modes_conflict(m_mode, mode); + bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread; + if (modes_dont_conflict || already_hold_exclusive_lock) { + // We got the lock! + if (!already_hold_exclusive_lock) + m_mode = mode; + m_holder = current_thread; + m_times_locked++; + m_lock.store(false, AK::memory_order_release); + return; + } + } while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked); } else if (Processor::current().in_critical()) { // If we're in a critical section and trying to lock, no context // switch will happen, so yield. diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index bf0dcaac4e..8f21bef644 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -872,12 +872,18 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva // we need to wait until the scheduler lock is released again { ScopedSpinLock sched_lock(g_scheduler_lock); + if (!queue.enqueue(*Thread::current())) { + // The WaitQueue was already requested to wake someone when + // nobody was waiting. So return right away as we shouldn't + // be waiting + return BlockResult::NotBlocked; + } + did_unlock = unlock_process_if_locked(); if (lock) *lock = false; set_state(State::Queued); m_wait_reason = reason; - queue.enqueue(*Thread::current()); if (timeout) { diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 1e0553a0ca..16a2a5cc9f 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -301,6 +301,7 @@ public: enum class BlockResult { WokeNormally, + NotBlocked, InterruptedBySignal, InterruptedByDeath, InterruptedByTimeout, diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp index 77ec3caec2..d89ca3df74 100644 --- a/Kernel/WaitQueue.cpp +++ b/Kernel/WaitQueue.cpp @@ -27,8 +27,11 @@ #include <Kernel/Thread.h> #include <Kernel/WaitQueue.h> +//#define WAITQUEUE_DEBUG + namespace Kernel { + WaitQueue::WaitQueue() { } @@ -37,10 +40,20 @@ WaitQueue::~WaitQueue() { } -void WaitQueue::enqueue(Thread& thread) +bool WaitQueue::enqueue(Thread& thread) { ScopedSpinLock queue_lock(m_lock); + if (m_wake_requested) { + // wake_* was called when no threads were in the queue + // we shouldn't wait at all + m_wake_requested = false; +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": enqueue: wake_all pending"; +#endif + return false; + } m_threads.append(thread); + return true; } void WaitQueue::wake_one(Atomic<bool>* lock) @@ -48,42 +61,87 @@ void WaitQueue::wake_one(Atomic<bool>* lock) ScopedSpinLock queue_lock(m_lock); if (lock) *lock = false; - if (m_threads.is_empty()) + if (m_threads.is_empty()) { + // Save the fact that a wake was requested + m_wake_requested = true; +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: nobody to wake, mark as pending"; +#endif return; - if (auto* thread = m_threads.take_first()) - thread->wake_from_queue(); + } +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one:"; +#endif + auto* thread = m_threads.take_first(); +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: wake thread " << *thread; +#endif + thread->wake_from_queue(); + m_wake_requested = false; Scheduler::yield(); } -void WaitQueue::wake_n(i32 wake_count) +void WaitQueue::wake_n(u32 wake_count) { ScopedSpinLock queue_lock(m_lock); - if (m_threads.is_empty()) + if (m_threads.is_empty()) { + // Save the fact that a wake was requested + m_wake_requested = true; +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: nobody to wake, mark as pending"; +#endif return; + } - for (i32 i = 0; i < wake_count; ++i) { +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: " << wake_count; +#endif + for (u32 i = 0; i < wake_count; ++i) { Thread* thread = m_threads.take_first(); if (!thread) break; +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: wake thread " << *thread; +#endif thread->wake_from_queue(); } + m_wake_requested = false; Scheduler::yield(); } void WaitQueue::wake_all() { ScopedSpinLock queue_lock(m_lock); - if (m_threads.is_empty()) + if (m_threads.is_empty()) { + // Save the fact that a wake was requested + m_wake_requested = true; +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: nobody to wake, mark as pending"; +#endif return; - while (!m_threads.is_empty()) - m_threads.take_first()->wake_from_queue(); + } +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: "; +#endif + while (!m_threads.is_empty()) { + Thread* thread = m_threads.take_first(); +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: wake thread " << *thread; +#endif + thread->wake_from_queue(); + } + m_wake_requested = false; Scheduler::yield(); } void WaitQueue::clear() { ScopedSpinLock queue_lock(m_lock); +#ifdef WAITQUEUE_DEBUG + dbg() << "WaitQueue " << VirtualAddress(this) << ": clear"; +#endif m_threads.clear(); + m_wake_requested = false; } } diff --git a/Kernel/WaitQueue.h b/Kernel/WaitQueue.h index c7705880bc..b6fa0e3691 100644 --- a/Kernel/WaitQueue.h +++ b/Kernel/WaitQueue.h @@ -38,9 +38,9 @@ public: WaitQueue(); ~WaitQueue(); - void enqueue(Thread&); + bool enqueue(Thread&); void wake_one(Atomic<bool>* lock = nullptr); - void wake_n(i32 wake_count); + void wake_n(u32 wake_count); void wake_all(); void clear(); @@ -48,6 +48,7 @@ private: typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList; ThreadList m_threads; SpinLock<u32> m_lock; + bool m_wake_requested { false }; }; } |