summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-05 19:13:28 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-06 10:02:55 +0200
commit41d2a0e9f7b523947d80567a11a4bf9f79f64617 (patch)
tree50abb66ef1db1d82230d74084c8f9266068370fc
parent9495eeb0759d2966fc1c085336f920950ee7fde4 (diff)
downloadserenity-41d2a0e9f7b523947d80567a11a4bf9f79f64617.zip
Kernel: Dequeue dying threads from WaitQueue
If a thread is waiting but getting killed, we need to dequeue the thread from the WaitQueue so that a potential wake before finalization doesn't happen.
-rw-r--r--Kernel/Thread.cpp31
-rw-r--r--Kernel/Thread.h1
-rw-r--r--Kernel/WaitQueue.cpp10
-rw-r--r--Kernel/WaitQueue.h2
4 files changed, 37 insertions, 7 deletions
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 989ce711f4..f5ebfac327 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -714,10 +714,20 @@ void Thread::set_state(State new_state)
ASSERT(g_scheduler_data->has_thread(*this));
}
- if (m_state == Dying && this != Thread::current() && is_finalizable()) {
- // Some other thread set this thread to Dying, notify the
- // finalizer right away as it can be cleaned up now
- Scheduler::notify_finalizer();
+ if (m_state == Dying) {
+ if (previous_state == Queued) {
+ // Holding the scheduler lock, we need to dequeue this thread
+ ASSERT(m_wait_queue != nullptr);
+ m_wait_queue->dequeue(*this);
+ m_wait_queue = nullptr;
+ }
+
+
+ if (this != Thread::current() && is_finalizable()) {
+ // Some other thread set this thread to Dying, notify the
+ // finalizer right away as it can be cleaned up now
+ Scheduler::notify_finalizer();
+ }
}
}
@@ -854,6 +864,7 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary)
{
+ auto* current_thread = Thread::current();
TimerId timer_id {};
bool did_unlock;
@@ -865,7 +876,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// we need to wait until the scheduler lock is released again
{
ScopedSpinLock sched_lock(g_scheduler_lock);
- if (!queue.enqueue(*Thread::current())) {
+ if (!queue.enqueue(*current_thread)) {
// The WaitQueue was already requested to wake someone when
// nobody was waiting. So return right away as we shouldn't
// be waiting
@@ -876,6 +887,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
return BlockResult::NotBlocked;
}
+ current_thread->m_wait_queue = &queue;
did_unlock = unlock_process_if_locked();
if (lock)
@@ -885,7 +897,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
if (timeout) {
timer_id = TimerQueue::the().add_timer(*timeout, [&]() {
- ScopedSpinLock sched_lock(g_scheduler_lock);
wake_from_queue();
});
}
@@ -921,7 +932,12 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// scheduler lock, which is held when we insert into the queue
ScopedSpinLock sched_lock(g_scheduler_lock);
- if (m_wait_queue_node.is_in_list())
+ // m_wait_queue should have been cleared either by the timeout
+ // or by being woken
+ ASSERT(m_wait_queue == nullptr);
+
+ // If our thread was still in the queue, we timed out
+ if (queue.dequeue(*current_thread))
result = BlockResult::InterruptedByTimeout;
// Make sure we cancel the timer if woke normally.
@@ -940,6 +956,7 @@ void Thread::wake_from_queue()
ScopedSpinLock lock(g_scheduler_lock);
ASSERT(state() == State::Queued);
m_wait_reason = nullptr;
+ m_wait_queue = nullptr;
if (this != Thread::current())
set_state(State::Runnable);
else
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 9ecc94e88a..d06ed100fd 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -521,6 +521,7 @@ public:
private:
IntrusiveListNode m_runnable_list_node;
IntrusiveListNode m_wait_queue_node;
+ WaitQueue* m_wait_queue { nullptr };
private:
friend class SchedulerData;
diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp
index d89ca3df74..f2e95573c6 100644
--- a/Kernel/WaitQueue.cpp
+++ b/Kernel/WaitQueue.cpp
@@ -56,6 +56,16 @@ bool WaitQueue::enqueue(Thread& thread)
return true;
}
+bool WaitQueue::dequeue(Thread& thread)
+{
+ ScopedSpinLock queue_lock(m_lock);
+ if (m_threads.contains(thread)) {
+ m_threads.remove(thread);
+ return true;
+ }
+ return false;
+}
+
void WaitQueue::wake_one(Atomic<bool>* lock)
{
ScopedSpinLock queue_lock(m_lock);
diff --git a/Kernel/WaitQueue.h b/Kernel/WaitQueue.h
index b6fa0e3691..08330e1745 100644
--- a/Kernel/WaitQueue.h
+++ b/Kernel/WaitQueue.h
@@ -38,7 +38,9 @@ public:
WaitQueue();
~WaitQueue();
+ SpinLock<u32>& get_lock() { return m_lock; }
bool enqueue(Thread&);
+ bool dequeue(Thread&);
void wake_one(Atomic<bool>* lock = nullptr);
void wake_n(u32 wake_count);
void wake_all();