summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2021-07-13 10:11:33 -0600
committerAndreas Kling <kling@serenityos.org>2021-07-13 20:23:10 +0200
commitfa8fe40266f2cd631b97e8155bdad2486c31f437 (patch)
treedafc979c2b05c115e68b5d0a4acfe690110c1751 /Kernel
parent552185066ed836eef38f1f9088c9482854dcf70a (diff)
downloadserenity-fa8fe40266f2cd631b97e8155bdad2486c31f437.zip
Revert "Kernel: Make sure threads which don't do any syscalls are t..."
This reverts commit 3c3a1726df847aff9db73862040d9f7a3b9fc907. We cannot blindly kill threads just because they're not executing in a system call. Being blocked (including in a page fault) needs proper unblocking and potentially kernel stack cleanup before we can mark a thread as Dying. Fixes #8691
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Scheduler.cpp7
-rw-r--r--Kernel/Syscall.cpp9
-rw-r--r--Kernel/Thread.h3
3 files changed, 0 insertions, 19 deletions
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index 2f21bfe111..da69b27cd7 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -217,13 +217,6 @@ bool Scheduler::pick_next()
ScopedSpinLock lock(g_scheduler_lock);
- auto current_thread = Thread::current();
- if (current_thread->should_die() && current_thread->may_die_immediately()) {
- // Ordinarily the thread would die on syscall exit, however if the thread
- // doesn't perform any syscalls we still need to mark it for termination here.
- current_thread->set_state(Thread::Dying);
- }
-
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
dump_thread_list();
}
diff --git a/Kernel/Syscall.cpp b/Kernel/Syscall.cpp
index 90799c9881..04950fb51f 100644
--- a/Kernel/Syscall.cpp
+++ b/Kernel/Syscall.cpp
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
-#include <AK/ScopeGuard.h>
#include <Kernel/API/Syscall.h>
#include <Kernel/Arch/x86/Interrupts.h>
#include <Kernel/Arch/x86/TrapFrame.h>
@@ -154,14 +153,6 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
{
auto& regs = *trap->regs;
auto current_thread = Thread::current();
- {
- ScopedSpinLock lock(g_scheduler_lock);
- current_thread->set_may_die_immediately(false);
- }
- ScopeGuard reset_may_die_immediately = [&current_thread] {
- ScopedSpinLock lock(g_scheduler_lock);
- current_thread->set_may_die_immediately(true);
- };
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
auto& process = current_thread->process();
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 1cb21a66a0..bb6f5914ee 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -1187,8 +1187,6 @@ public:
bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
- bool may_die_immediately() const { return m_may_die_immediately; }
- void set_may_die_immediately(bool flag) { m_may_die_immediately = flag; }
InodeIndex global_procfs_inode_index() const { return m_global_procfs_inode_index; }
private:
@@ -1287,7 +1285,6 @@ private:
Kernel::Lock* m_blocking_lock { nullptr };
u32 m_lock_requested_count { 0 };
IntrusiveListNode<Thread> m_blocked_threads_list_node;
- bool m_may_die_immediately { true };
#if LOCK_DEBUG
struct HoldingLockInfo {