summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-10 14:05:24 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-11 14:54:36 +0200
commit49d5232f3350453cf45846580e506d6c79da10ba (patch)
treeded26fb0dcde08d3a4eefd09b80607d8d900c772
parent1f7190d3bdb4dea524a98786554a5314b75e5c72 (diff)
downloadserenity-49d5232f3350453cf45846580e506d6c79da10ba.zip
Kernel: Always return from Thread::wait_on
We need to always return from Thread::wait_on, even when a thread is being killed. This is necessary so that the kernel call stack can clean up and release references held by it. Then, right before transitioning back to user mode, we check if the thread is supposed to die, and at that point change the thread state to Dying to prevent further scheduling of this thread. This addresses some possible resource leaks similar to #3073
-rw-r--r--Kernel/Process.cpp4
-rw-r--r--Kernel/Scheduler.cpp15
-rw-r--r--Kernel/Thread.cpp40
-rw-r--r--Kernel/Thread.h2
4 files changed, 28 insertions, 33 deletions
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp
index cb5bd298c5..a6fe422550 100644
--- a/Kernel/Process.cpp
+++ b/Kernel/Process.cpp
@@ -273,10 +273,6 @@ void Process::kill_threads_except_self()
// At this point, we have no joiner anymore
thread.m_joiner = nullptr;
thread.set_should_die();
-
- if (thread.state() != Thread::State::Dead)
- thread.set_state(Thread::State::Dying);
-
return IterationDecision::Continue;
});
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index dcfdd4074c..7d0a477903 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -365,6 +365,21 @@ bool Scheduler::pick_next()
ScopedSpinLock lock(g_scheduler_lock);
+ if (current_thread->should_die() && current_thread->state() == Thread::Running) {
+ // Rather than immediately killing threads, yanking the kernel stack
+ // away from them (which can lead to e.g. reference leaks), we always
+ // allow Thread::wait_on to return. This allows the kernel stack to
+ // clean up and eventually we'll get here shortly before transitioning
+ // back to user mode (from Processor::exit_trap). At this point we
+ // no longer want to schedule this thread. We can't wait until
+ // Scheduler::enter_current because we don't want to allow it to
+ // transition back to user mode.
+#ifdef SCHEDULER_DEBUG
+ dbg() << "Scheduler[" << Processor::current().id() << "]: Thread " << *current_thread << " is dying";
+#endif
+ current_thread->set_state(Thread::Dying);
+ }
+
// Check and unblock threads whose wait conditions have been met.
Scheduler::for_each_nonrunnable([&](Thread& thread) {
thread.consider_unblock(now_sec, now_usec);
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 196c6befa4..ac654db1b2 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -117,17 +117,11 @@ void Thread::unblock()
ASSERT(m_lock.own_lock());
m_blocker = nullptr;
if (Thread::current() == this) {
- if (m_should_die)
- set_state(Thread::Dying);
- else
- set_state(Thread::Running);
+ set_state(Thread::Running);
return;
}
ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
- if (m_should_die)
- set_state(Thread::Dying);
- else
- set_state(Thread::Runnable);
+ set_state(Thread::Runnable);
}
void Thread::set_should_die()
@@ -150,12 +144,6 @@ void Thread::set_should_die()
// We're blocked in the kernel.
m_blocker->set_interrupted_by_death();
unblock();
- } else {
- // We're executing in userspace (and we're clearly
- // not the current thread). No need to unwind, so
- // set the state to dying right away. This also
- // makes sure we won't be scheduled anymore.
- set_state(Thread::State::Dying);
}
}
@@ -169,7 +157,7 @@ void Thread::die_if_needed()
unlock_process_if_locked();
ScopedCritical critical;
- set_state(Thread::State::Dying);
+ set_should_die();
// Flag a context switch. Because we're in a critical section,
// Scheduler::yield will actually only mark a pending scontext switch
@@ -715,13 +703,7 @@ void Thread::set_state(State new_state)
}
if (m_state == Dying) {
- if (previous_state == Queued) {
- // Holding the scheduler lock, we need to dequeue this thread
- ASSERT(m_wait_queue != nullptr);
- m_wait_queue->dequeue(*this);
- m_wait_queue = nullptr;
- }
-
+ ASSERT(previous_state != Queued);
if (this != Thread::current() && is_finalizable()) {
// Some other thread set this thread to Dying, notify the
// finalizer right away as it can be cleaned up now
@@ -886,7 +868,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
return BlockResult::NotBlocked;
}
- current_thread->m_wait_queue = &queue;
did_unlock = unlock_process_if_locked();
if (lock)
@@ -923,6 +904,14 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// This looks counter productive, but we may not actually leave
// the critical section we just restored. It depends on whether
// we were in one while being called.
+ if (current_thread->should_die()) {
+ // We're being unblocked so that we can clean up. We shouldn't
+ // be in Dying state until we're about to return back to user mode
+ ASSERT(current_thread->state() == Thread::Running);
+#ifdef THREAD_DEBUG
+ dbg() << "Dying thread " << *current_thread << " was unblocked";
+#endif
+ }
}
BlockResult result(BlockResult::WokeNormally);
@@ -931,10 +920,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// scheduler lock, which is held when we insert into the queue
ScopedSpinLock sched_lock(g_scheduler_lock);
- // m_wait_queue should have been cleared either by the timeout
- // or by being woken
- ASSERT(m_wait_queue == nullptr);
-
// If our thread was still in the queue, we timed out
if (queue.dequeue(*current_thread))
result = BlockResult::InterruptedByTimeout;
@@ -955,7 +940,6 @@ void Thread::wake_from_queue()
ScopedSpinLock lock(g_scheduler_lock);
ASSERT(state() == State::Queued);
m_wait_reason = nullptr;
- m_wait_queue = nullptr;
if (this != Thread::current())
set_state(State::Runnable);
else
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 0252333d9e..75d13371b7 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -392,6 +392,7 @@ public:
// Tell this thread to unblock if needed,
// gracefully unwind the stack and die.
void set_should_die();
+ bool should_die() const { return m_should_die; }
void die_if_needed();
bool tick();
@@ -521,7 +522,6 @@ public:
private:
IntrusiveListNode m_runnable_list_node;
IntrusiveListNode m_wait_queue_node;
- WaitQueue* m_wait_queue { nullptr };
private:
friend class SchedulerData;