summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2020-01-01 16:49:08 +0100
committerAndreas Kling <awesomekling@gmail.com>2020-01-01 16:54:21 +0100
commitfd740829d1976c7da2372b60e36d1ae569d6148d (patch)
tree2af7a01e9885170180749b55b223ea2ce08cb136 /Kernel
parent9c0836ce97ae36165abd8eb5241bb5239af3a756 (diff)
downloadserenity-fd740829d1976c7da2372b60e36d1ae569d6148d.zip
Kernel: Switch to eagerly restoring x86 FPU state on context switch
Lazy FPU restore is well known to be vulnerable to timing attacks, and eager restore is a lot simpler anyway, so let's just do it eagerly.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Arch/i386/CPU.cpp29
-rw-r--r--Kernel/Scheduler.cpp7
-rw-r--r--Kernel/Scheduler.h1
-rw-r--r--Kernel/Thread.cpp14
-rw-r--r--Kernel/Thread.h3
5 files changed, 15 insertions, 39 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 4eb17dc968..cabb15d40f 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -205,34 +205,11 @@ void general_protection_fault_handler(RegisterDump regs)
// 7: FPU not available exception
EH_ENTRY_NO_CODE(7, fpu_exception);
-void fpu_exception_handler(RegisterDump regs)
+void fpu_exception_handler(RegisterDump)
{
- (void)regs;
-
+ // Just clear the TS flag. We've already restored the FPU state eagerly.
+ // FIXME: It would be nice if we didn't have to do this at all.
asm volatile("clts");
- if (g_last_fpu_thread == current)
- return;
- if (g_last_fpu_thread) {
- asm volatile("fxsave %0"
- : "=m"(g_last_fpu_thread->fpu_state()));
- } else {
- asm volatile("fnclex");
- }
- g_last_fpu_thread = current;
-
- if (current->has_used_fpu()) {
- asm volatile("fxrstor %0" ::"m"(current->fpu_state()));
- } else {
- asm volatile("fninit");
- asm volatile("fxsave %0"
- : "=m"(current->fpu_state()));
- current->set_has_used_fpu(true);
- }
-
-#ifdef FPU_EXCEPTION_DEBUG
- kprintf("%s FPU not available exception: %u(%s)\n", current->process().is_ring0() ? "Kernel" : "Process", current->pid(), current->process().name().characters());
- dump(regs);
-#endif
}
// 14: Page Fault
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index 0f270dd2ff..9f593ac297 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -39,7 +39,6 @@ static u32 time_slice_for(const Thread& thread)
}
Thread* current;
-Thread* g_last_fpu_thread;
Thread* g_finalizer;
Thread* g_colonel;
WaitQueue* g_finalizer_wait_queue;
@@ -376,7 +375,6 @@ bool Scheduler::pick_next()
}
}
-
if (!thread_to_schedule)
thread_to_schedule = g_colonel;
@@ -457,6 +455,9 @@ bool Scheduler::context_switch(Thread& thread)
if (current->state() == Thread::Running)
current->set_state(Thread::Runnable);
+ asm volatile("fxsave %0"
+ : "=m"(current->fpu_state()));
+
#ifdef LOG_EVERY_CONTEXT_SWITCH
dbgprintf("Scheduler: %s(%u:%u) -> %s(%u:%u) [%u] %w:%x\n",
current->process().name().characters(), current->process().pid(), current->tid(),
@@ -469,6 +470,8 @@ bool Scheduler::context_switch(Thread& thread)
current = &thread;
thread.set_state(Thread::Running);
+ asm volatile("fxrstor %0" ::"m"(current->fpu_state()));
+
if (!thread.selector()) {
thread.set_selector(gdt_alloc_entry());
auto& descriptor = get_gdt_entry(thread.selector());
diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h
index 25841c4030..ec46c117ef 100644
--- a/Kernel/Scheduler.h
+++ b/Kernel/Scheduler.h
@@ -12,7 +12,6 @@ struct RegisterDump;
struct SchedulerData;
extern Thread* current;
-extern Thread* g_last_fpu_thread;
extern Thread* g_finalizer;
extern Thread* g_colonel;
extern WaitQueue* g_finalizer_wait_queue;
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 30c0057a58..342a42878b 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -10,6 +10,8 @@
//#define SIGNAL_DEBUG
+static FPUState s_clean_fpu_state;
+
u16 thread_specific_selector()
{
static u16 selector;
@@ -55,7 +57,7 @@ Thread::Thread(Process& process)
dbgprintf("Thread{%p}: New thread TID=%u in %s(%u)\n", this, m_tid, process.name().characters(), process.pid());
set_default_signal_dispositions();
m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
- memset(m_fpu_state, 0, sizeof(FPUState));
+ memcpy(m_fpu_state, &s_clean_fpu_state, sizeof(FPUState));
memset(&m_tss, 0, sizeof(m_tss));
// Only IF is set when a process boots.
@@ -119,9 +121,6 @@ Thread::~Thread()
thread_table().remove(this);
}
- if (g_last_fpu_thread == this)
- g_last_fpu_thread = nullptr;
-
if (selector())
gdt_free_entry(selector());
@@ -625,8 +624,7 @@ u32 Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vecto
}
env[environment.size()] = nullptr;
- auto push_on_new_stack = [&new_esp](u32 value)
- {
+ auto push_on_new_stack = [&new_esp](u32 value) {
new_esp -= 4;
u32* stack_ptr = (u32*)new_esp;
*stack_ptr = value;
@@ -646,7 +644,6 @@ Thread* Thread::clone(Process& process)
memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
clone->m_signal_mask = m_signal_mask;
memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
- clone->m_has_used_fpu = m_has_used_fpu;
clone->m_thread_specific_data = m_thread_specific_data;
return clone;
}
@@ -654,6 +651,9 @@ Thread* Thread::clone(Process& process)
void Thread::initialize()
{
Scheduler::initialize();
+ asm volatile("fninit");
+ asm volatile("fxsave %0"
+ : "=m"(s_clean_fpu_state));
}
Vector<Thread*> Thread::all_threads()
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index ff1a4d8659..cd2beb3a4d 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -329,8 +329,6 @@ public:
bool has_signal_handler(u8 signal) const;
FPUState& fpu_state() { return *m_fpu_state; }
- bool has_used_fpu() const { return m_has_used_fpu; }
- void set_has_used_fpu(bool b) { m_has_used_fpu = b; }
void set_default_signal_dispositions();
void push_value_on_stack(u32);
@@ -457,7 +455,6 @@ private:
u32 m_priority { THREAD_PRIORITY_NORMAL };
u32 m_extra_priority { 0 };
u32 m_priority_boost { 0 };
- bool m_has_used_fpu { false };
bool m_dump_backtrace_on_finalization { false };
bool m_should_die { false };