summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorkleines Filmröllchen <filmroellchen@serenityos.org>2022-06-17 23:58:01 +0200
committerLinus Groh <mail@linusgroh.de>2022-07-19 12:12:13 +0100
commitd0e614c04553489332b26624d4cc5218c43df60d (patch)
treed689403536667aad14019623fbe446376a88052f /Kernel
parentc517dfde12e92575d6f68de3784447d85b6f0501 (diff)
downloadserenity-d0e614c04553489332b26624d4cc5218c43df60d.zip
Kernel: Don't check that interrupts are enabled during early boot
The interrupts enabled check in the Kernel mutex is there so that we don't lock mutexes within a spinlock, because mutexes reenable interrupts and that will mess up the spinlock in more ways than one if the thread moves processors. This check is guarded behind a debug flag because it's too hard to fix all the problems at once, but we regressed and weren't even getting to init stage 2 with it enabled. With this commit, we get to stage 2 again. In early boot, there are no interrupts enabled and spinlocks used, so we can sort of kind of safely ignore the interrupt state. There might be a better solution with another boot state flag that checks whether APs are up (because they have interrupts enabled from the start) but that seems overkill.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Locking/Mutex.cpp23
1 files changed, 17 insertions, 6 deletions
diff --git a/Kernel/Locking/Mutex.cpp b/Kernel/Locking/Mutex.cpp
index 849fd6e25e..99f836a077 100644
--- a/Kernel/Locking/Mutex.cpp
+++ b/Kernel/Locking/Mutex.cpp
@@ -12,6 +12,8 @@
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Thread.h>
+extern bool g_in_early_boot;
+
namespace Kernel {
void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
@@ -19,8 +21,11 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections!
VERIFY(!Processor::current_in_irq());
- if constexpr (LOCK_IN_CRITICAL_DEBUG)
- VERIFY_INTERRUPTS_ENABLED();
+ if constexpr (LOCK_IN_CRITICAL_DEBUG) {
+ // There are no interrupts enabled in early boot.
+ if (!g_in_early_boot)
+ VERIFY_INTERRUPTS_ENABLED();
+ }
VERIFY(mode != Mode::Unlocked);
auto* current_thread = Thread::current();
@@ -147,9 +152,12 @@ void Mutex::unlock()
{
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections!
- if constexpr (LOCK_IN_CRITICAL_DEBUG)
- VERIFY_INTERRUPTS_ENABLED();
VERIFY(!Processor::current_in_irq());
+ if constexpr (LOCK_IN_CRITICAL_DEBUG) {
+ // There are no interrupts enabled in early boot.
+ if (!g_in_early_boot)
+ VERIFY_INTERRUPTS_ENABLED();
+ }
auto* current_thread = Thread::current();
SpinlockLocker lock(m_lock);
Mode current_mode = m_mode;
@@ -205,8 +213,11 @@ void Mutex::unlock()
void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& lock, u32 requested_locks)
{
- if constexpr (LOCK_IN_CRITICAL_DEBUG)
- VERIFY_INTERRUPTS_ENABLED();
+ if constexpr (LOCK_IN_CRITICAL_DEBUG) {
+ // There are no interrupts enabled in early boot.
+ if (!g_in_early_boot)
+ VERIFY_INTERRUPTS_ENABLED();
+ }
m_blocked_thread_lists.with([&](auto& lists) {
auto append_to_list = [&]<typename L>(L& list) {
VERIFY(!list.contains(current_thread));