diff options
author | Andreas Kling <kling@serenityos.org> | 2021-08-22 12:21:31 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-08-23 00:02:09 +0200 |
commit | d60635cb9df9b4e72702db02b88b26ab9123c2a8 (patch) | |
tree | 162105dee63773b68678c54d5a7f4016f7574c8f | |
parent | 3e3f760808e614ca70d97294013cb58f2a0deaf5 (diff) | |
download | serenity-d60635cb9df9b4e72702db02b88b26ab9123c2a8.zip |
Kernel: Convert Processor::in_irq() to static current_in_irq()
This closes the race window between Processor::current() and a context
switch happening before in_irq().
-rw-r--r-- | Kernel/Arch/x86/Processor.h | 6 | ||||
-rw-r--r-- | Kernel/Arch/x86/common/Interrupts.cpp | 2 | ||||
-rw-r--r-- | Kernel/Arch/x86/i386/Processor.cpp | 2 | ||||
-rw-r--r-- | Kernel/Arch/x86/x86_64/Processor.cpp | 2 | ||||
-rw-r--r-- | Kernel/Devices/AsyncDeviceRequest.cpp | 2 | ||||
-rw-r--r-- | Kernel/Devices/HID/I8042Controller.cpp | 10 | ||||
-rw-r--r-- | Kernel/FileSystem/File.h | 4 | ||||
-rw-r--r-- | Kernel/Locking/Mutex.cpp | 8 | ||||
-rw-r--r-- | Kernel/Memory/MemoryManager.cpp | 4 | ||||
-rw-r--r-- | Kernel/SanCov.cpp | 2 | ||||
-rw-r--r-- | Kernel/Scheduler.cpp | 16 | ||||
-rw-r--r-- | Kernel/Thread.cpp | 12 | ||||
-rw-r--r-- | Kernel/Thread.h | 4 | ||||
-rw-r--r-- | Kernel/Time/TimeManagement.cpp | 2 |
14 files changed, 37 insertions, 39 deletions
diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index 42ebcc82b8..6377f0ddaf 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -120,7 +120,7 @@ class Processor { u32 m_gdt_length; u32 m_cpu; - u32 m_in_irq; + FlatPtr m_in_irq {}; volatile u32 m_in_critical {}; static Atomic<u32> s_idle_cpu_mask; @@ -329,9 +329,9 @@ public: return Processor::id() == 0; } - ALWAYS_INLINE u32& in_irq() + ALWAYS_INLINE static FlatPtr current_in_irq() { - return m_in_irq; + return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq)); } ALWAYS_INLINE static void restore_in_critical(u32 critical) diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp index 5f26402bfe..0177559b80 100644 --- a/Kernel/Arch/x86/common/Interrupts.cpp +++ b/Kernel/Arch/x86/common/Interrupts.cpp @@ -288,7 +288,7 @@ void page_fault_handler(TrapFrame* trap) bool faulted_in_kernel = !(regs.cs & 3); - if (faulted_in_kernel && Processor::current().in_irq()) { + if (faulted_in_kernel && Processor::current_in_irq()) { // If we're faulting in an IRQ handler, first check if we failed // due to safe_memcpy, safe_strnlen, or safe_memset. If we did, // gracefully continue immediately. Because we're in an IRQ handler diff --git a/Kernel/Arch/x86/i386/Processor.cpp b/Kernel/Arch/x86/i386/Processor.cpp index e57861b9f5..e29bec6159 100644 --- a/Kernel/Arch/x86/i386/Processor.cpp +++ b/Kernel/Arch/x86/i386/Processor.cpp @@ -180,7 +180,7 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit) void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) { - VERIFY(!in_irq()); + VERIFY(!m_in_irq); VERIFY(m_in_critical == 1); VERIFY(is_kernel_mode()); diff --git a/Kernel/Arch/x86/x86_64/Processor.cpp b/Kernel/Arch/x86/x86_64/Processor.cpp index 2996957aa8..6ab4d907c0 100644 --- a/Kernel/Arch/x86/x86_64/Processor.cpp +++ b/Kernel/Arch/x86/x86_64/Processor.cpp @@ -164,7 +164,7 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit) void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) { - VERIFY(!in_irq()); + VERIFY(!m_in_irq); VERIFY(m_in_critical == 1); VERIFY(is_kernel_mode()); diff --git a/Kernel/Devices/AsyncDeviceRequest.cpp b/Kernel/Devices/AsyncDeviceRequest.cpp index 84524be30c..89f699d04d 100644 --- a/Kernel/Devices/AsyncDeviceRequest.cpp +++ b/Kernel/Devices/AsyncDeviceRequest.cpp @@ -135,7 +135,7 @@ void AsyncDeviceRequest::complete(RequestResult result) VERIFY(m_result == Started); m_result = result; } - if (Processor::current().in_irq()) { + if (Processor::current_in_irq()) { ref(); // Make sure we don't get freed Processor::deferred_call_queue([this]() { request_finished(); diff --git a/Kernel/Devices/HID/I8042Controller.cpp b/Kernel/Devices/HID/I8042Controller.cpp index 3f11b70560..a55326ef92 100644 --- a/Kernel/Devices/HID/I8042Controller.cpp +++ b/Kernel/Devices/HID/I8042Controller.cpp @@ -132,7 +132,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices() bool I8042Controller::irq_process_input_buffer(HIDDevice::Type) { - VERIFY(Processor::current().in_irq()); + VERIFY(Processor::current_in_irq()); u8 status = IO::in8(I8042_STATUS); if (!(status & I8042_BUFFER_FULL)) @@ -167,7 +167,7 @@ bool I8042Controller::do_reset_device(HIDDevice::Type device) VERIFY(device != HIDDevice::Type::Unknown); VERIFY(m_lock.is_locked()); - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); if (do_send_command(device, 0xff) != I8042_ACK) return false; // Wait until we get the self-test result @@ -179,7 +179,7 @@ u8 I8042Controller::do_send_command(HIDDevice::Type device, u8 command) VERIFY(device != HIDDevice::Type::Unknown); VERIFY(m_lock.is_locked()); - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); return do_write_to_device(device, command); } @@ -189,7 +189,7 @@ u8 I8042Controller::do_send_command(HIDDevice::Type device, u8 command, u8 data) VERIFY(device != HIDDevice::Type::Unknown); VERIFY(m_lock.is_locked()); - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); u8 response = do_write_to_device(device, command); if (response == I8042_ACK) @@ -202,7 +202,7 @@ u8 I8042Controller::do_write_to_device(HIDDevice::Type device, u8 data) VERIFY(device != HIDDevice::Type::Unknown); VERIFY(m_lock.is_locked()); - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); int attempts = 0; u8 response; diff --git a/Kernel/FileSystem/File.h b/Kernel/FileSystem/File.h index cfc9bd3134..9957445029 100644 --- a/Kernel/FileSystem/File.h +++ b/Kernel/FileSystem/File.h @@ -121,7 +121,7 @@ protected: void evaluate_block_conditions() { - if (Processor::current().in_irq()) { + if (Processor::current_in_irq()) { // If called from an IRQ handler we need to delay evaluation // and unblocking of waiting threads. Note that this File // instance may be deleted until the deferred call is executed! @@ -137,7 +137,7 @@ protected: private: ALWAYS_INLINE void do_evaluate_block_conditions() { - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); block_condition().unblock(); } diff --git a/Kernel/Locking/Mutex.cpp b/Kernel/Locking/Mutex.cpp index b7271fbf07..829001958b 100644 --- a/Kernel/Locking/Mutex.cpp +++ b/Kernel/Locking/Mutex.cpp @@ -17,7 +17,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location) { // NOTE: This may be called from an interrupt handler (not an IRQ handler) // and also from within critical sections! - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); VERIFY(mode != Mode::Unlocked); auto current_thread = Thread::current(); @@ -143,7 +143,7 @@ void Mutex::unlock() { // NOTE: This may be called from an interrupt handler (not an IRQ handler) // and also from within critical sections! - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); auto current_thread = Thread::current(); SpinlockLocker lock(m_lock); Mode current_mode = m_mode; @@ -253,7 +253,7 @@ auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode { // NOTE: This may be called from an interrupt handler (not an IRQ handler) // and also from within critical sections! - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); auto current_thread = Thread::current(); SpinlockLocker lock(m_lock); auto current_mode = m_mode; @@ -316,7 +316,7 @@ void Mutex::restore_lock(Mode mode, u32 lock_count, [[maybe_unused]] LockLocatio { VERIFY(mode != Mode::Unlocked); VERIFY(lock_count > 0); - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); auto current_thread = Thread::current(); bool did_block = false; SpinlockLocker lock(m_lock); diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 469ab4d01f..fd83dc49d3 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -685,9 +685,9 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr) PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) { VERIFY_INTERRUPTS_DISABLED(); - if (Processor::current().in_irq()) { + if (Processor::current_in_irq()) { dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}", - Processor::id(), fault.code(), fault.vaddr(), Processor::current().in_irq()); + Processor::id(), fault.code(), fault.vaddr(), Processor::current_in_irq()); dump_kernel_regions(); return PageFaultResponse::ShouldCrash; } diff --git a/Kernel/SanCov.cpp b/Kernel/SanCov.cpp index 90d2fb0262..8e8040a706 100644 --- a/Kernel/SanCov.cpp +++ b/Kernel/SanCov.cpp @@ -17,7 +17,7 @@ void __sanitizer_cov_trace_pc(void) if (g_in_early_boot) [[unlikely]] return; - if (Processor::current().in_irq()) [[unlikely]] { + if (Processor::current_in_irq()) [[unlikely]] { // Do not trace in interrupts. return; } diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 8f19be9dad..528eac4a23 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -252,16 +252,15 @@ bool Scheduler::pick_next() bool Scheduler::yield() { InterruptDisabler disabler; - auto& proc = Processor::current(); auto current_thread = Thread::current(); - dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq()); + dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::id(), *current_thread, Processor::current_in_irq()); VERIFY(current_thread != nullptr); - if (proc.in_irq() || Processor::in_critical()) { + if (Processor::current_in_irq() || Processor::in_critical()) { // If we're handling an IRQ we can't switch context, or we're in // a critical section where we don't want to switch contexts, then // delay until exiting the trap or critical section - proc.invoke_scheduler_async(); + Processor::current().invoke_scheduler_async(); return false; } @@ -269,7 +268,7 @@ bool Scheduler::yield() return false; if constexpr (SCHEDULER_DEBUG) - dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current().in_irq()); + dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current_in_irq()); return true; } @@ -462,7 +461,7 @@ void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel) void Scheduler::timer_tick(const RegisterState& regs) { VERIFY_INTERRUPTS_DISABLED(); - VERIFY(Processor::current().in_irq()); + VERIFY(Processor::current_in_irq()); auto current_thread = Processor::current_thread(); if (!current_thread) @@ -506,15 +505,14 @@ void Scheduler::timer_tick(const RegisterState& regs) } VERIFY_INTERRUPTS_DISABLED(); - VERIFY(Processor::current().in_irq()); + VERIFY(Processor::current_in_irq()); Processor::current().invoke_scheduler_async(); } void Scheduler::invoke_async() { VERIFY_INTERRUPTS_DISABLED(); - auto& processor = Processor::current(); - VERIFY(!processor.in_irq()); + VERIFY(!Processor::current_in_irq()); // Since this function is called when leaving critical sections (such // as a Spinlock), we need to check if we're not already doing this diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 3c5d4ec18a..9b7a00d8ec 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -157,7 +157,7 @@ Thread::~Thread() void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock, u32 lock_count) { - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); VERIFY(this == Thread::current()); ScopedCritical critical; VERIFY(!Memory::s_mm_lock.own_lock()); @@ -238,7 +238,7 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock) SpinlockLocker scheduler_lock(g_scheduler_lock); SpinlockLocker block_lock(m_block_lock); VERIFY(m_blocking_lock == &lock); - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); VERIFY(g_scheduler_lock.own_lock()); VERIFY(m_block_lock.own_lock()); VERIFY(m_blocking_lock == &lock); @@ -251,7 +251,7 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock) VERIFY(m_state != Thread::Runnable && m_state != Thread::Running); set_state(Thread::Runnable); }; - if (Processor::current().in_irq()) { + if (Processor::current_in_irq()) { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() { if (auto this_thread = self.strong_ref()) do_unblock(); @@ -272,7 +272,7 @@ void Thread::unblock_from_blocker(Blocker& blocker) if (!should_be_stopped() && !is_stopped()) unblock(); }; - if (Processor::current().in_irq()) { + if (Processor::current_in_irq()) { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() { if (auto this_thread = self.strong_ref()) do_unblock(); @@ -284,7 +284,7 @@ void Thread::unblock_from_blocker(Blocker& blocker) void Thread::unblock(u8 signal) { - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); VERIFY(g_scheduler_lock.own_lock()); VERIFY(m_block_lock.own_lock()); if (m_state != Thread::Blocked) @@ -377,7 +377,7 @@ void Thread::die_if_needed() // Now leave the critical section so that we can also trigger the // actual context switch Processor::clear_critical(); - dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq()); + dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current_in_irq()); // We should never get here, but the scoped scheduler lock // will be released by Scheduler::context_switch again VERIFY_NOT_REACHED(); diff --git a/Kernel/Thread.h b/Kernel/Thread.h index c4ee8a04ed..608483ae1d 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -847,7 +847,7 @@ public: template<typename BlockerType, class... Args> [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) { - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); VERIFY(this == Thread::current()); ScopedCritical critical; VERIFY(!Memory::s_mm_lock.own_lock()); @@ -889,7 +889,7 @@ public: // Process::kill_all_threads may be called at any time, which will mark all // threads to die. In that case timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() { - VERIFY(!Processor::current().in_irq()); + VERIFY(!Processor::current_in_irq()); VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!m_block_lock.own_lock()); // NOTE: this may execute on the same or any other processor! diff --git a/Kernel/Time/TimeManagement.cpp b/Kernel/Time/TimeManagement.cpp index 6d9f6c6bf1..5cc391a413 100644 --- a/Kernel/Time/TimeManagement.cpp +++ b/Kernel/Time/TimeManagement.cpp @@ -403,7 +403,7 @@ void TimeManagement::increment_time_since_boot() void TimeManagement::system_timer_tick(const RegisterState& regs) { - if (Processor::current().in_irq() <= 1) { + if (Processor::current_in_irq() <= 1) { // Don't expire timers while handling IRQs TimerQueue::the().fire(); } |