summaryrefslogtreecommitdiff
path: root/Kernel/Arch
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-07-08 15:20:36 -0600
committerAndreas Kling <kling@serenityos.org>2020-07-09 23:24:55 +0200
commitb02d33bd63e1bff476d6024746434c1325081e7d (patch)
tree3e575edb4627e22edd129172f8c3561d4acc6076 /Kernel/Arch
parent5d9ea2c787705964319bdd734560ab2c1181e696 (diff)
downloadserenity-b02d33bd63e1bff476d6024746434c1325081e7d.zip
Kernel: Fix some flaws that caused crashes or hangs during boot
We need to halt the BSP briefly until all APs are ready for the first context switch, but we can't hold the same spinlock by all of them while doing so. So, while the APs are waiting on each other they need to release the scheduler lock, and then once signaled re-acquire it. Should solve some timing dependent hangs or crashes, most easily observed using qemu with kvm disabled.
Diffstat (limited to 'Kernel/Arch')
-rw-r--r--Kernel/Arch/i386/CPU.cpp27
1 files changed, 24 insertions, 3 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 88adf2f018..b30ddbba8d 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -990,8 +990,6 @@ void Processor::initialize(u32 cpu)
if (cpu >= s_processors->size())
s_processors->resize(cpu + 1);
(*s_processors)[cpu] = this;
-
- klog() << "CPU[" << cpu << "]: initialized Processor at " << VirtualAddress(FlatPtr(this));
}
}
@@ -1344,6 +1342,27 @@ void Processor::assume_context(Thread& thread, u32 flags)
ASSERT_NOT_REACHED();
}
+extern "C" void pre_init_finished(void)
+{
+ ASSERT(g_scheduler_lock.own_lock());
+
+ // The target flags will get restored upon leaving the trap
+ u32 prev_flags = cpu_flags();
+ g_scheduler_lock.unlock(prev_flags);
+
+ // We because init_finished() will wait on the other APs, we need
+ // to release the scheduler lock so that the other APs can also get
+ // to this point
+}
+
+extern "C" void post_init_finished(void)
+{
+ // We need to re-acquire the scheduler lock before a context switch
+ // transfers control into the idle loop, which needs the lock held
+ ASSERT(!g_scheduler_lock.own_lock());
+ g_scheduler_lock.lock();
+}
+
void Processor::initialize_context_switching(Thread& initial_thread)
{
ASSERT(initial_thread.process().is_ring0());
@@ -1368,9 +1387,11 @@ void Processor::initialize_context_switching(Thread& initial_thread)
"addl $20, %%ebx \n" // calculate pointer to TrapFrame
"pushl %%ebx \n"
"cld \n"
- "pushl %[cpu] \n"
+ "pushl %[cpu] \n" // push argument for init_finished before register is clobbered
+ "call pre_init_finished \n"
"call init_finished \n"
"addl $4, %%esp \n"
+ "call post_init_finished \n"
"call enter_trap_no_irq \n"
"addl $4, %%esp \n"
"lret \n"