summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-09-26 10:55:48 -0600
committerAndreas Kling <kling@serenityos.org>2020-09-26 20:03:16 +0200
commit69a9c7878332c848c678e016d951f218688a58e5 (patch)
tree2133fd6cf96baf5e3830ad7a587de276a580eead
parent8b293119aba6432a80f73a608557d5531de92c52 (diff)
downloadserenity-69a9c7878332c848c678e016d951f218688a58e5.zip
Kernel: Allow killing queued threads
We need to dequeue and wake threads that are waiting if the process terminates. Fixes #3603 without the HackStudio fixes in #3606.
-rw-r--r--Kernel/Thread.cpp25
-rw-r--r--Kernel/Thread.h7
2 files changed, 26 insertions, 6 deletions
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 2e9fe1ac3d..caa5300133 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -151,6 +151,14 @@ void Thread::set_should_die()
// the kernel stacks can clean up. We won't ever return back
// to user mode, though
resume_from_stopped();
+ } else if (state() == Queued) {
+ // m_queue can only be accessed safely if g_scheduler_lock is held!
+ if (m_queue) {
+ m_queue->dequeue(*this);
+ m_queue = nullptr;
+ // Wake the thread
+ wake_from_queue();
+ }
}
}
@@ -973,6 +981,8 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// we need to wait until the scheduler lock is released again
{
ScopedSpinLock sched_lock(g_scheduler_lock);
+ // m_queue can only be accessed safely if g_scheduler_lock is held!
+ m_queue = &queue;
if (!queue.enqueue(*current_thread)) {
// The WaitQueue was already requested to wake someone when
// nobody was waiting. So return right away as we shouldn't
@@ -1026,9 +1036,18 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// scheduler lock, which is held when we insert into the queue
ScopedSpinLock sched_lock(g_scheduler_lock);
- // If our thread was still in the queue, we timed out
- if (queue.dequeue(*current_thread))
- result = BlockResult::InterruptedByTimeout;
+ if (m_queue) {
+ ASSERT(m_queue == &queue);
+ // If our thread was still in the queue, we timed out
+ m_queue = nullptr;
+ if (queue.dequeue(*current_thread))
+ result = BlockResult::InterruptedByTimeout;
+ } else {
+ // Our thread was already removed from the queue. The only
+ // way this can happen if someone else is trying to kill us.
+ // In this case, the queue should not contain us anymore.
+ return BlockResult::InterruptedByDeath;
+ }
// Make sure we cancel the timer if woke normally.
if (timeout && !result.was_interrupted())
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index f3d57c6fff..d5cd017614 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -438,12 +438,12 @@ public:
// to clean up now while we're still holding m_lock
t.was_unblocked();
- if (t.was_interrupted_by_signal())
- return BlockResult::InterruptedBySignal;
-
if (t.was_interrupted_by_death())
return BlockResult::InterruptedByDeath;
+ if (t.was_interrupted_by_signal())
+ return BlockResult::InterruptedBySignal;
+
return BlockResult::WokeNormally;
}
@@ -634,6 +634,7 @@ private:
Blocker* m_blocker { nullptr };
timespec* m_blocker_timeout { nullptr };
const char* m_wait_reason { nullptr };
+ WaitQueue* m_queue { nullptr };
Atomic<bool> m_is_active { false };
bool m_is_joinable { true };