summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-14 10:24:31 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-15 00:15:00 +0200
commit72960fedc60f12a8f34366557fec808c8ce82f12 (patch)
treed343bd7ee59c63b81e096420bac395c0eab658b5
parent2614ef550c7b94dbe7c7e0b96622380d1186aea1 (diff)
downloadserenity-72960fedc60f12a8f34366557fec808c8ce82f12.zip
Kernel: Briefly resume stopped threads when being killed
We need to briefly put Stopped threads back into Running state so that the kernel stacks can get cleaned up when they're being killed. Fixes #3130
-rw-r--r--Kernel/Thread.cpp44
-rw-r--r--Kernel/Thread.h2
2 files changed, 33 insertions, 13 deletions
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 31b409dd47..6a4f9c4f55 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -136,7 +136,21 @@ void Thread::set_should_die()
// Remember that we should die instead of returning to
// the userspace.
- m_should_die = true;
+ {
+ ScopedSpinLock lock(g_scheduler_lock);
+ m_should_die = true;
+
+ // NOTE: Even the current thread can technically be in "Stopped"
+ // state! This is the case when another thread sent a SIGSTOP to
+ // it while it was running and it calls e.g. exit() before
+ // the scheduler gets involved again.
+ if (is_stopped()) {
+ // If we were stopped, we need to briefly resume so that
+ // the kernel stacks can clean up. We won't ever return back
+ // to user mode, though
+ resume_from_stopped();
+ }
+ }
if (is_blocked()) {
ScopedSpinLock lock(m_lock);
@@ -428,6 +442,20 @@ static void push_value_on_user_stack(u32* stack, u32 data)
copy_to_user((u32*)*stack, &data);
}
+void Thread::resume_from_stopped()
+{
+ ASSERT(is_stopped());
+ ASSERT(m_stop_state != State::Invalid);
+ set_state(m_stop_state);
+ m_stop_state = State::Invalid;
+ // make sure SemiPermanentBlocker is unblocked
+ if (m_state != Thread::Runnable && m_state != Thread::Running) {
+ ScopedSpinLock lock(m_lock);
+ if (m_blocker && m_blocker->is_reason_signal())
+ unblock();
+ }
+}
+
ShouldUnblockThread Thread::dispatch_signal(u8 signal)
{
ASSERT_INTERRUPTS_DISABLED();
@@ -455,18 +483,8 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
}
if (signal == SIGCONT && is_stopped()) {
- ASSERT(m_stop_state != State::Invalid);
- set_state(m_stop_state);
- m_stop_state = State::Invalid;
- // make sure SemiPermanentBlocker is unblocked
- if (m_state != Thread::Runnable && m_state != Thread::Running) {
- ScopedSpinLock lock(m_lock);
- if (m_blocker && m_blocker->is_reason_signal())
- unblock();
- }
- }
-
- else {
+ resume_from_stopped();
+ } else {
auto* thread_tracer = tracer();
if (thread_tracer != nullptr) {
// when a thread is traced, it should be stopped whenever it receives a signal
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 75d13371b7..3ab9fb3ca9 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -270,6 +270,8 @@ public:
void did_schedule() { ++m_times_scheduled; }
u32 times_scheduled() const { return m_times_scheduled; }
+ void resume_from_stopped();
+
bool is_stopped() const { return m_state == Stopped; }
bool is_blocked() const { return m_state == Blocked; }
bool has_blocker() const