summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-02 12:08:22 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-02 20:50:29 +0200
commitf011c420c16d4da5775e055ad96430bac1d96610 (patch)
treee5607555638c2244737f5518cbd4792ffe8d96a3
parent6e54d0c072912dd59fdd92f7f28ef90ef21daa1b (diff)
downloadserenity-f011c420c16d4da5775e055ad96430bac1d96610.zip
Kernel: Fix signal delivery when no syscall is made
This fixes a regression introduced by the new software context switching where the Kernel would not deliver a signal unless the process is making system calls. This is because the TSS no longer updates the CS value, so the scheduler never considered delivery as the process always appeared to be in kernel mode. With software context switching we can just set up the signal trampoline at any time and when the processor returns back to user mode it'll get executed. This should fix e.g. killing programs that are stuck in some tight loop that doesn't make any system calls and is only pre-empted by the timer interrupt. Fixes #2958
-rw-r--r--Kernel/Process.cpp2
-rw-r--r--Kernel/Scheduler.cpp11
-rw-r--r--Kernel/Thread.cpp34
-rw-r--r--Kernel/Thread.h2
4 files changed, 10 insertions, 39 deletions
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp
index 03379463dd..e00d511f36 100644
--- a/Kernel/Process.cpp
+++ b/Kernel/Process.cpp
@@ -773,7 +773,7 @@ void Process::terminate_due_to_signal(u8 signal)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(signal < 32);
- dbg() << "Terminating due to signal " << signal;
+ dbg() << "Terminating " << *this << " due to signal " << signal;
m_termination_status = 0;
m_termination_signal = signal;
die();
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index 5b2119d772..48ba933914 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -405,17 +405,6 @@ bool Scheduler::pick_next()
Thread::for_each_living([&](Thread& thread) -> IterationDecision {
if (!thread.has_unmasked_pending_signals())
return IterationDecision::Continue;
- // FIXME: It would be nice if the Scheduler didn't have to worry about who is "current"
- // For now, avoid dispatching signals to "current" and do it in a scheduling pass
- // while some other process is interrupted. Otherwise a mess will be made.
- if (&thread == current_thread)
- return IterationDecision::Continue;
- // We know how to interrupt blocked processes, but if they are just executing
- // at some random point in the kernel, let them continue.
- // Before returning to userspace from a syscall, we will block a thread if it has any
- // pending unmasked signals, allowing it to be dispatched then.
- if (thread.in_kernel() && !thread.is_blocked() && !thread.is_stopped())
- return IterationDecision::Continue;
// NOTE: dispatch_one_pending_signal() may unblock the process.
bool was_blocked = thread.is_blocked();
if (thread.dispatch_one_pending_signal() == ShouldUnblockThread::No)
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 00084c645c..6beac18097 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -144,12 +144,11 @@ void Thread::set_should_die()
m_should_die = true;
if (is_blocked()) {
- ASSERT(in_kernel());
ASSERT(m_blocker != nullptr);
// We're blocked in the kernel.
m_blocker->set_interrupted_by_death();
unblock();
- } else if (!in_kernel()) {
+ } else {
// We're executing in userspace (and we're clearly
// not the current thread). No need to unwind, so
// set the state to dying right away. This also
@@ -446,7 +445,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
ASSERT(!process().is_ring0());
#ifdef SIGNAL_DEBUG
- klog() << "dispatch_signal <- " << signal;
+ klog() << "signal: dispatch signal " << signal << " to " << *this;
#endif
auto& action = m_signal_action_data[signal];
@@ -518,7 +517,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
if (handler_vaddr.as_ptr() == SIG_IGN) {
#ifdef SIGNAL_DEBUG
- klog() << "ignored signal " << signal;
+ klog() << "signal: " << *this << " ignored signal " << signal;
#endif
return ShouldUnblockThread::Yes;
}
@@ -572,31 +571,16 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
};
// We now place the thread state on the userspace stack.
- // Note that when we are in the kernel (ie. blocking) we cannot use the
- // tss, as that will contain kernel state; instead, we use a RegisterState.
+ // Note that we use a RegisterState.
// Conversely, when the thread isn't blocking the RegisterState may not be
// valid (fork, exec etc) but the tss will, so we use that instead.
- if (!in_kernel()) {
- u32* stack = &m_tss.esp;
- setup_stack(m_tss, stack);
-
- m_tss.cs = GDT_SELECTOR_CODE3 | 3;
- m_tss.ds = GDT_SELECTOR_DATA3 | 3;
- m_tss.es = GDT_SELECTOR_DATA3 | 3;
- m_tss.fs = GDT_SELECTOR_DATA3 | 3;
- m_tss.gs = GDT_SELECTOR_TLS | 3;
- m_tss.eip = g_return_to_ring3_from_signal_trampoline.get();
- // FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
- set_state(Skip1SchedulerPass);
- } else {
- auto& regs = get_register_dump_from_stack();
- u32* stack = &regs.userspace_esp;
- setup_stack(regs, stack);
- regs.eip = g_return_to_ring3_from_signal_trampoline.get();
- }
+ auto& regs = get_register_dump_from_stack();
+ u32* stack = &regs.userspace_esp;
+ setup_stack(regs, stack);
+ regs.eip = g_return_to_ring3_from_signal_trampoline.get();
#ifdef SIGNAL_DEBUG
- klog() << "signal: Okay, {" << state_string() << "} has been primed with signal handler " << String::format("%w", m_tss.cs) << ":" << String::format("%x", m_tss.eip);
+ klog() << "signal: Okay, " << *this << " {" << state_string() << "} has been primed with signal handler " << String::format("%w", m_tss.cs) << ":" << String::format("%x", m_tss.eip) << " to deliver " << signal;
#endif
return ShouldUnblockThread::Yes;
}
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 354a9ccfa1..0dc7ea1444 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -274,8 +274,6 @@ public:
bool has_blocker() const { return m_blocker != nullptr; }
const Blocker& blocker() const;
- bool in_kernel() const { return (m_tss.cs & 0x03) == 0; }
-
u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
u32 affinity() const { return m_cpu_affinity; }