diff options
author | Sergey Bugaev <bugaevc@gmail.com> | 2019-11-14 18:46:01 +0300 |
---|---|---|
committer | Andreas Kling <awesomekling@gmail.com> | 2019-11-14 20:05:58 +0100 |
commit | 1e1ddce9d8f94dd220174755925cb8d7f03ef36a (patch) | |
tree | 8dec8c25e0711525c12bc59110e1f471ce039962 | |
parent | 00e56cda0c2e302d62f87f45e7cb24323cfa92d7 (diff) | |
download | serenity-1e1ddce9d8f94dd220174755925cb8d7f03ef36a.zip |
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
-rw-r--r-- | Kernel/Process.cpp | 16 | ||||
-rw-r--r-- | Kernel/Syscall.cpp | 3 | ||||
-rw-r--r-- | Kernel/Thread.cpp | 40 | ||||
-rw-r--r-- | Kernel/Thread.h | 6 |
4 files changed, 58 insertions, 7 deletions
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 4de291cd4c..4095f29f8a 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -836,6 +836,7 @@ void Process::sys$exit(int status) m_termination_status = status; m_termination_signal = 0; die(); + current->die_if_needed(); ASSERT_NOT_REACHED(); } @@ -921,6 +922,7 @@ void Process::crash(int signal, u32 eip) { ASSERT_INTERRUPTS_DISABLED(); ASSERT(!is_dead()); + ASSERT(¤t->process() == this); if (m_elf_loader && ksyms_ready) dbgprintf("\033[31;1m%p %s\033[0m\n", eip, m_elf_loader->symbolicate(eip).characters()); @@ -930,6 +932,9 @@ void Process::crash(int signal, u32 eip) dump_regions(); ASSERT(is_ring3()); die(); + // We can not return from here, as there is nowhere + // to unwind to, so die right away. + current->die_if_needed(); ASSERT_NOT_REACHED(); } @@ -2320,16 +2325,13 @@ void Process::die() m_tracer->set_dead(); { + // Tell the threads to unwind and die. InterruptDisabler disabler; for_each_thread([](Thread& thread) { - if (thread.state() != Thread::State::Dead) - thread.set_state(Thread::State::Dying); + thread.set_should_die(); return IterationDecision::Continue; }); } - - if (!Scheduler::is_active()) - Scheduler::pick_next_and_switch_now(); } size_t Process::amount_virtual() const @@ -2831,9 +2833,9 @@ void Process::sys$exit_thread(void* code) sys$exit(0); return; } - current->set_state(Thread::State::Dying); + current->set_should_die(); big_lock().unlock_if_locked(); - Scheduler::pick_next_and_switch_now(); + current->die_if_needed(); ASSERT_NOT_REACHED(); } diff --git a/Kernel/Syscall.cpp b/Kernel/Syscall.cpp index dd71cc9eaa..e86c075ad5 100644 --- a/Kernel/Syscall.cpp +++ b/Kernel/Syscall.cpp @@ -100,4 +100,7 @@ void syscall_trap_entry(RegisterDump regs) if (auto* tracer = process.tracer()) tracer->did_syscall(function, arg1, arg2, arg3, regs.eax); process.big_lock().unlock(); + + // Check if we're supposed to return to userspace or just die. + current->die_if_needed(); } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 29d0ab25ed..5dd341d2a6 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -131,6 +131,46 @@ void Thread::unblock() set_state(Thread::Runnable); } +void Thread::set_should_die() +{ + if (m_should_die) + return; + InterruptDisabler disabler; + + // Remember that we should die instead of returning to + // the userspace. + m_should_die = true; + + if (is_blocked()) { + ASSERT(in_kernel()); + ASSERT(m_blocker != nullptr); + // We're blocked in the kernel. Pretend to have + // been interrupted by a signal (perhaps that is + // what has actually killed us). + m_blocker->set_interrupted_by_signal(); + unblock(); + } else if (!in_kernel()) { + // We're executing in userspace (and we're clearly + // not the current thread). No need to unwind, so + // set the state to dying right away. This also + // makes sure we won't be scheduled anymore. + set_state(Thread::State::Dying); + } +} + +void Thread::die_if_needed() +{ + ASSERT(current == this); + + if (!m_should_die) + return; + + InterruptDisabler disabler; + set_state(Thread::State::Dying); + if (!Scheduler::is_active()) + Scheduler::pick_next_and_switch_now(); +} + void Thread::block_helper() { // This function mostly exists to avoid circular header dependencies. If diff --git a/Kernel/Thread.h b/Kernel/Thread.h index da7d2bc248..3e2b55ed80 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -276,6 +276,11 @@ public: void unblock(); + // Tell this thread to unblock if needed, + // gracefully unwind the stack and die. + void set_should_die(); + void die_if_needed(); + const FarPtr& far_ptr() const { return m_far_ptr; } bool tick(); @@ -356,6 +361,7 @@ private: ThreadPriority m_priority { ThreadPriority::Normal }; bool m_has_used_fpu { false }; bool m_dump_backtrace_on_finalization { false }; + bool m_should_die { false }; void block_helper(); }; |