summaryrefslogtreecommitdiff
path: root/Kernel/Arch/x86
diff options
context:
space:
mode:
authorTimon Kruiper <timonkruiper@gmail.com>2022-08-23 21:42:30 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-26 12:51:57 +0200
commite8aff0c1c88510190285578f6539ca411faedd67 (patch)
tree155fbde0df2989d4f7d5a4a4296eb2fa4189ed4d /Kernel/Arch/x86
parent6432f3eee8b21979c0a455008f0a8d2124ed15a7 (diff)
downloadserenity-e8aff0c1c88510190285578f6539ca411faedd67.zip
Kernel: Use InterruptsState in Spinlock code
This commit updates the lock function from Spinlock and RecursiveSpinlock to return the InterruptsState of the processor, instead of the processor flags. The unlock functions would only look at the interrupt flag of the processor flags, so we now use the InterruptsState enum to clarify the intent, and such that we can use the same Spinlock code for the aarch64 build. To not break the build, all the call sites are updated aswell.
Diffstat (limited to 'Kernel/Arch/x86')
-rw-r--r--Kernel/Arch/x86/common/Interrupts.cpp3
-rw-r--r--Kernel/Arch/x86/common/Processor.cpp3
-rw-r--r--Kernel/Arch/x86/common/Spinlock.cpp32
3 files changed, 14 insertions, 24 deletions
diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp
index 604a74da33..d9fb429941 100644
--- a/Kernel/Arch/x86/common/Interrupts.cpp
+++ b/Kernel/Arch/x86/common/Interrupts.cpp
@@ -467,8 +467,7 @@ extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
// to this point
// The target flags will get restored upon leaving the trap
- u32 prev_flags = cpu_flags();
- Scheduler::leave_on_first_switch(prev_flags);
+ Scheduler::leave_on_first_switch(processor_interrupts_state());
}
extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp
index 46902160f3..21158c54ed 100644
--- a/Kernel/Arch/x86/common/Processor.cpp
+++ b/Kernel/Arch/x86/common/Processor.cpp
@@ -1561,8 +1561,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
- FlatPtr flags = trap->regs->flags();
- Scheduler::leave_on_first_switch(flags & ~0x200);
+ Scheduler::leave_on_first_switch(InterruptsState::Disabled);
}
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
diff --git a/Kernel/Arch/x86/common/Spinlock.cpp b/Kernel/Arch/x86/common/Spinlock.cpp
index 5af0b61495..6cf3cfbae5 100644
--- a/Kernel/Arch/x86/common/Spinlock.cpp
+++ b/Kernel/Arch/x86/common/Spinlock.cpp
@@ -8,35 +8,31 @@
namespace Kernel {
-u32 Spinlock::lock()
+InterruptsState Spinlock::lock()
{
- u32 prev_flags = cpu_flags();
+ InterruptsState previous_interrupts_state = processor_interrupts_state();
Processor::enter_critical();
- cli();
+ Processor::disable_interrupts();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
Processor::wait_check();
track_lock_acquire(m_rank);
- return prev_flags;
+ return previous_interrupts_state;
}
-void Spinlock::unlock(u32 prev_flags)
+void Spinlock::unlock(InterruptsState previous_interrupts_state)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
Processor::leave_critical();
-
- if ((prev_flags & 0x200) != 0)
- sti();
- else
- cli();
+ restore_processor_interrupts_state(previous_interrupts_state);
}
-u32 RecursiveSpinlock::lock()
+InterruptsState RecursiveSpinlock::lock()
{
- u32 prev_flags = cpu_flags();
- cli();
+ InterruptsState previous_interrupts_state = processor_interrupts_state();
+ Processor::disable_interrupts();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
@@ -50,10 +46,10 @@ u32 RecursiveSpinlock::lock()
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
- return prev_flags;
+ return previous_interrupts_state;
}
-void RecursiveSpinlock::unlock(u32 prev_flags)
+void RecursiveSpinlock::unlock(InterruptsState previous_interrupts_state)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(m_recursions > 0);
@@ -64,11 +60,7 @@ void RecursiveSpinlock::unlock(u32 prev_flags)
}
Processor::leave_critical();
-
- if ((prev_flags & 0x200) != 0)
- sti();
- else
- cli();
+ restore_processor_interrupts_state(previous_interrupts_state);
}
}