summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2021-01-23 22:24:10 -0700
committerAndreas Kling <kling@serenityos.org>2021-01-27 21:12:24 +0100
commitf88a8b16d7391ccdb828f659e40b892fe8fdbc83 (patch)
tree891f1daa4a1a7275d5ddff99cdcd9c3c59dc9c36 /Kernel
parent33cdc1d2f141cf216501465a80c8c3198d005f6a (diff)
downloadserenity-f88a8b16d7391ccdb828f659e40b892fe8fdbc83.zip
Kernel: Make entering and leaving critical sections atomic
We also need to store m_in_critical in the Thread upon switching, and we need to restore it. This solves a problem where threads moving between different processors could end up with an unexpected value.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Arch/i386/CPU.cpp3
-rw-r--r--Kernel/Arch/i386/CPU.h31
-rw-r--r--Kernel/Thread.h4
3 files changed, 25 insertions, 13 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 67848f1422..0105b5e639 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -1291,6 +1291,7 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
write_cr3(to_tss.cr3);
to_thread->set_cpu(processor.id());
+ processor.restore_in_critical(to_thread->saved_critical());
asm volatile("fxrstor %0"
::"m"(to_thread->fpu_state()));
@@ -1308,6 +1309,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
ASSERT(is_kernel_mode());
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
+ from_thread->save_critical(m_in_critical);
// Switch to new thread context, passing from_thread and to_thread
// through to the new context using registers edx and eax
@@ -1347,6 +1349,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
[to_eip] "c" (to_thread->tss().eip),
[from_thread] "d" (from_thread),
[to_thread] "a" (to_thread)
+ : "memory"
);
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index 0595912189..bdc75034dd 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -708,7 +708,7 @@ class Processor {
u32 m_cpu;
u32 m_in_irq;
- u32 m_in_critical;
+ Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_in_critical;
TSS32 m_tss;
static FPUState s_clean_fpu_state;
@@ -876,15 +876,16 @@ public:
{
ASSERT(prev_irq <= m_in_irq);
if (!prev_irq) {
- if (m_in_critical == 0) {
- auto prev_critical = m_in_critical++;
+ u32 prev_critical = 0;
+ if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
m_in_irq = prev_irq;
deferred_call_execute_pending();
- ASSERT(m_in_critical == prev_critical + 1);
- m_in_critical = prev_critical;
- }
- if (!m_in_critical)
+ auto prev_raised = m_in_critical.exchange(prev_critical);
+ ASSERT(prev_raised == prev_critical + 1);
+ check_invoke_scheduler();
+ } else if (prev_critical == 0) {
check_invoke_scheduler();
+ }
} else {
m_in_irq = prev_irq;
}
@@ -895,11 +896,16 @@ public:
return m_in_irq;
}
+ ALWAYS_INLINE void restore_in_critical(u32 critical)
+ {
+ m_in_critical = critical;
+ }
+
ALWAYS_INLINE void enter_critical(u32& prev_flags)
{
- m_in_critical++;
prev_flags = cpu_flags();
cli();
+ m_in_critical++;
}
ALWAYS_INLINE void leave_critical(u32 prev_flags)
@@ -925,9 +931,8 @@ public:
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
{
- u32 prev_crit = m_in_critical;
- m_in_critical = 0;
prev_flags = cpu_flags();
+ u32 prev_crit = m_in_critical.exchange(0, AK::MemoryOrder::memory_order_acquire);
if (!m_in_irq)
check_invoke_scheduler();
if (enable_interrupts)
@@ -937,15 +942,15 @@ public:
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
{
- ASSERT(m_in_critical == 0);
- m_in_critical = prev_crit;
+ m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
+ ASSERT(!prev_crit || !(prev_flags & 0x200));
if (prev_flags & 0x200)
sti();
else
cli();
}
- ALWAYS_INLINE u32& in_critical() { return m_in_critical; }
+ ALWAYS_INLINE u32 in_critical() { return m_in_critical.load(); }
ALWAYS_INLINE const FPUState& clean_fpu_state() const
{
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 52fa204e6d..4208f245fd 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -1065,6 +1065,9 @@ public:
m_is_active.store(active, AK::memory_order_release);
}
+ u32 saved_critical() const { return m_saved_critical; }
+ void save_critical(u32 critical) { m_saved_critical = critical; }
+
[[nodiscard]] bool is_active() const
{
return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
@@ -1239,6 +1242,7 @@ private:
ThreadID m_tid { -1 };
TSS32 m_tss;
TrapFrame* m_current_trap { nullptr };
+ u32 m_saved_critical { 1 };
Atomic<u32> m_cpu { 0 };
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
u32 m_ticks_left { 0 };