summaryrefslogtreecommitdiff
path: root/Kernel/Scheduler.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2020-01-01 16:49:08 +0100
committerAndreas Kling <awesomekling@gmail.com>2020-01-01 16:54:21 +0100
commitfd740829d1976c7da2372b60e36d1ae569d6148d (patch)
tree2af7a01e9885170180749b55b223ea2ce08cb136 /Kernel/Scheduler.cpp
parent9c0836ce97ae36165abd8eb5241bb5239af3a756 (diff)
downloadserenity-fd740829d1976c7da2372b60e36d1ae569d6148d.zip
Kernel: Switch to eagerly restoring x86 FPU state on context switch
Lazy FPU restore is well known to be vulnerable to timing attacks, and eager restore is a lot simpler anyway, so let's just do it eagerly.
Diffstat (limited to 'Kernel/Scheduler.cpp')
-rw-r--r--Kernel/Scheduler.cpp7
1 files changed, 5 insertions, 2 deletions
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index 0f270dd2ff..9f593ac297 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -39,7 +39,6 @@ static u32 time_slice_for(const Thread& thread)
}
Thread* current;
-Thread* g_last_fpu_thread;
Thread* g_finalizer;
Thread* g_colonel;
WaitQueue* g_finalizer_wait_queue;
@@ -376,7 +375,6 @@ bool Scheduler::pick_next()
}
}
-
if (!thread_to_schedule)
thread_to_schedule = g_colonel;
@@ -457,6 +455,9 @@ bool Scheduler::context_switch(Thread& thread)
if (current->state() == Thread::Running)
current->set_state(Thread::Runnable);
+ asm volatile("fxsave %0"
+ : "=m"(current->fpu_state()));
+
#ifdef LOG_EVERY_CONTEXT_SWITCH
dbgprintf("Scheduler: %s(%u:%u) -> %s(%u:%u) [%u] %w:%x\n",
current->process().name().characters(), current->process().pid(), current->tid(),
@@ -469,6 +470,8 @@ bool Scheduler::context_switch(Thread& thread)
current = &thread;
thread.set_state(Thread::Running);
+ asm volatile("fxrstor %0" ::"m"(current->fpu_state()));
+
if (!thread.selector()) {
thread.set_selector(gdt_alloc_entry());
auto& descriptor = get_gdt_entry(thread.selector());