summaryrefslogtreecommitdiff
path: root/Kernel/Scheduler.cpp
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-10 14:05:24 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-11 14:54:36 +0200
commit49d5232f3350453cf45846580e506d6c79da10ba (patch)
treeded26fb0dcde08d3a4eefd09b80607d8d900c772 /Kernel/Scheduler.cpp
parent1f7190d3bdb4dea524a98786554a5314b75e5c72 (diff)
downloadserenity-49d5232f3350453cf45846580e506d6c79da10ba.zip
Kernel: Always return from Thread::wait_on
We need to always return from Thread::wait_on, even when a thread is being killed. This is necessary so that the kernel call stack can clean up and release references held by it. Then, right before transitioning back to user mode, we check if the thread is supposed to die, and at that point change the thread state to Dying to prevent further scheduling of this thread. This addresses some possible resource leaks similar to #3073
Diffstat (limited to 'Kernel/Scheduler.cpp')
-rw-r--r--Kernel/Scheduler.cpp15
1 files changed, 15 insertions, 0 deletions
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index dcfdd4074c..7d0a477903 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -365,6 +365,21 @@ bool Scheduler::pick_next()
ScopedSpinLock lock(g_scheduler_lock);
+ if (current_thread->should_die() && current_thread->state() == Thread::Running) {
+ // Rather than immediately killing threads, yanking the kernel stack
+ // away from them (which can lead to e.g. reference leaks), we always
+ // allow Thread::wait_on to return. This allows the kernel stack to
+ // clean up and eventually we'll get here shortly before transitioning
+ // back to user mode (from Processor::exit_trap). At this point we
+ // no longer want to schedule this thread. We can't wait until
+ // Scheduler::enter_current because we don't want to allow it to
+ // transition back to user mode.
+#ifdef SCHEDULER_DEBUG
+ dbg() << "Scheduler[" << Processor::current().id() << "]: Thread " << *current_thread << " is dying";
+#endif
+ current_thread->set_state(Thread::Dying);
+ }
+
// Check and unblock threads whose wait conditions have been met.
Scheduler::for_each_nonrunnable([&](Thread& thread) {
thread.consider_unblock(now_sec, now_usec);