diff options
author | Tom <tomut@yahoo.com> | 2021-01-26 20:44:01 -0700 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-01-27 21:12:24 +0100 |
commit | e2f9e557d343baa40eca9724c7e28630a7fcaa58 (patch) | |
tree | ad3ff7e08836eddd4c5323d2fd786613f144fddb /Kernel | |
parent | 21d288a10e5a1a44e8852b70dd3bdfb619483180 (diff) | |
download | serenity-e2f9e557d343baa40eca9724c7e28630a7fcaa58.zip |
Kernel: Make Processor::id a static function
This eliminates the window between calling Processor::current and
the member function where a thread could be moved to another
processor. This is generally not as big of a concern as with
Processor::current_thread, but also slightly more light weight.
Diffstat (limited to 'Kernel')
-rw-r--r-- | Kernel/Arch/i386/CPU.cpp | 16 | ||||
-rw-r--r-- | Kernel/Arch/i386/CPU.h | 13 | ||||
-rw-r--r-- | Kernel/FileSystem/ProcFS.cpp | 2 | ||||
-rw-r--r-- | Kernel/Interrupts/APIC.cpp | 13 | ||||
-rw-r--r-- | Kernel/Scheduler.cpp | 28 | ||||
-rw-r--r-- | Kernel/Time/TimeManagement.cpp | 4 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 10 |
7 files changed, 48 insertions, 38 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp index 1cbfc1a291..89f2c48a26 100644 --- a/Kernel/Arch/i386/CPU.cpp +++ b/Kernel/Arch/i386/CPU.cpp @@ -174,7 +174,7 @@ void handle_crash(RegisterState& regs, const char* description, int signal, bool // make sure we switch back to the right page tables. MM.enter_process_paging_scope(*process); - klog() << "CRASH: CPU #" << Processor::current().id() << " " << description << ". Ring " << (regs.cs & 3) << "."; + klog() << "CRASH: CPU #" << Processor::id() << " " << description << ". Ring " << (regs.cs & 3) << "."; dump(regs); if (!(regs.cs & 3)) { @@ -232,7 +232,7 @@ void page_fault_handler(TrapFrame* trap) if constexpr (PAGE_FAULT_DEBUG) { u32 fault_page_directory = read_cr3(); dbgln("CPU #{} ring {} {} page fault in PD={:#x}, {}{} {}", - Processor::is_initialized() ? Processor::current().id() : 0, + Processor::is_initialized() ? Processor::id() : 0, regs.cs & 3, regs.exception_code & 1 ? "PV" : "NP", fault_page_directory, @@ -1207,7 +1207,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames lock.unlock(); capture_current_thread(); } else if (thread.is_active()) { - ASSERT(thread.cpu() != Processor::current().id()); + ASSERT(thread.cpu() != Processor::id()); // If this is the case, the thread is currently running // on another processor. We can't trust the kernel stack as // it may be changing at any time. We need to probably send @@ -1216,7 +1216,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames auto& proc = Processor::current(); smp_unicast(thread.cpu(), [&]() { - dbgln("CPU[{}] getting stack for cpu #{}", Processor::current().id(), proc.id()); + dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id()); ProcessPagingScope paging_scope(thread.process()); ASSERT(&Processor::current() != &proc); ASSERT(&thread == Processor::current_thread()); @@ -1294,7 +1294,7 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) if (from_tss.cr3 != to_tss.cr3) write_cr3(to_tss.cr3); - to_thread->set_cpu(processor.id()); + to_thread->set_cpu(processor.get_id()); processor.restore_in_critical(to_thread->saved_critical()); asm volatile("fxrstor %0" @@ -1862,7 +1862,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg) { auto& cur_proc = Processor::current(); - dbgln<SMP_DEBUG>("SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc)); + dbgln<SMP_DEBUG>("SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc)); atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release); ASSERT(msg.refs > 0); @@ -1927,11 +1927,11 @@ void Processor::smp_broadcast(void (*callback)(), bool async) void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async) { auto& cur_proc = Processor::current(); - ASSERT(cpu != cur_proc.id()); + ASSERT(cpu != cur_proc.get_id()); auto& target_proc = processors()[cpu]; msg.async = async; - dbgln<SMP_DEBUG>("SMP[{}]: Send message {} to cpu #{} proc: {}", cur_proc.id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_proc)); + dbgln<SMP_DEBUG>("SMP[{}]: Send message {} to cpu #{} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), cpu, VirtualAddress(&target_proc)); atomic_store(&msg.refs, 1u, AK::MemoryOrder::memory_order_release); if (target_proc->smp_queue_message(msg)) { diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index 3c0e1ab5db..77ed93552b 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -867,11 +867,22 @@ public: write_fs_u32(__builtin_offsetof(Processor, m_current_thread), FlatPtr(¤t_thread)); } - ALWAYS_INLINE u32 id() + ALWAYS_INLINE u32 get_id() const { + // NOTE: This variant should only be used when iterating over all + // Processor instances, or when it's guaranteed that the thread + // cannot move to another processor in between calling Processor::current + // and Processor::get_id, or if this fact is not important. + // All other cases should use Processor::id instead! return m_cpu; } + ALWAYS_INLINE static u32 id() + { + // See comment in Processor::current_thread + return read_fs_u32(__builtin_offsetof(Processor, m_cpu)); + } + ALWAYS_INLINE u32 raise_irq() { return m_in_irq++; diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index ff8a92593b..9e5fe17c37 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -677,7 +677,7 @@ static bool procfs$cpuinfo(InodeIdentifier, KBufferBuilder& builder) JsonArray features; for (auto& feature : info.features().split(' ')) features.append(feature); - obj.add("processor", proc.id()); + obj.add("processor", proc.get_id()); obj.add("cpuid", info.cpuid()); obj.add("family", info.display_family()); obj.add("features", features); diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp index e6431f04c3..a48fb6613e 100644 --- a/Kernel/Interrupts/APIC.cpp +++ b/Kernel/Interrupts/APIC.cpp @@ -508,7 +508,7 @@ void APIC::init_finished(u32 cpu) void APIC::broadcast_ipi() { #if APIC_SMP_DEBUG - klog() << "SMP: Broadcast IPI from cpu #" << Processor::current().id(); + klog() << "SMP: Broadcast IPI from cpu #" << Processor::id(); #endif wait_for_pending_icr(); write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf)); @@ -516,11 +516,10 @@ void APIC::broadcast_ipi() void APIC::send_ipi(u32 cpu) { - auto& proc = Processor::current(); #if APIC_SMP_DEBUG - klog() << "SMP: Send IPI from cpu #" << proc.id() << " to cpu #" << cpu; + klog() << "SMP: Send IPI from cpu #" << Processor::id() << " to cpu #" << cpu; #endif - ASSERT(cpu != proc.id()); + ASSERT(cpu != Processor::id()); ASSERT(cpu < 8); wait_for_pending_icr(); write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand, 1u << cpu)); @@ -532,7 +531,7 @@ APICTimer* APIC::initialize_timers(HardwareTimerBase& calibration_timer) return nullptr; // We should only initialize and calibrate the APIC timer once on the BSP! - ASSERT(Processor::current().id() == 0); + ASSERT(Processor::id() == 0); ASSERT(!m_apic_timer); m_apic_timer = APICTimer::initialize(IRQ_APIC_TIMER, calibration_timer); @@ -605,7 +604,7 @@ u32 APIC::get_timer_divisor() void APICIPIInterruptHandler::handle_interrupt(const RegisterState&) { #if APIC_SMP_DEBUG - klog() << "APIC IPI on cpu #" << Processor::current().id(); + klog() << "APIC IPI on cpu #" << Processor::id(); #endif } @@ -620,7 +619,7 @@ bool APICIPIInterruptHandler::eoi() void APICErrInterruptHandler::handle_interrupt(const RegisterState&) { - klog() << "APIC: SMP error on cpu #" << Processor::current().id(); + klog() << "APIC: SMP error on cpu #" << Processor::id(); } bool APICErrInterruptHandler::eoi() diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 36b802ac62..1fd17e1ae7 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -91,7 +91,7 @@ void Scheduler::start() idle_thread.set_initialized(true); processor.init_context(idle_thread, false); idle_thread.set_state(Thread::Running); - ASSERT(idle_thread.affinity() == (1u << processor.id())); + ASSERT(idle_thread.affinity() == (1u << processor.get_id())); processor.initialize_context_switching(idle_thread); ASSERT_NOT_REACHED(); } @@ -130,13 +130,13 @@ bool Scheduler::pick_next() // transition back to user mode. if constexpr (SCHEDULER_DEBUG) - dbgln("Scheduler[{}]: Thread {} is dying", Processor::current().id(), *current_thread); + dbgln("Scheduler[{}]: Thread {} is dying", Processor::id(), *current_thread); current_thread->set_state(Thread::Dying); } if constexpr (SCHEDULER_RUNNABLE_DEBUG) { - dbgln("Scheduler[{}j]: Non-runnables:", Processor::current().id()); + dbgln("Scheduler[{}j]: Non-runnables:", Processor::id()); Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision { if (thread.state() == Thread::Dying) { dbgln(" {:12} {} @ {:04x}:{:08x} Finalizable: {}", @@ -156,7 +156,7 @@ bool Scheduler::pick_next() return IterationDecision::Continue; }); - dbgln("Scheduler[{}j]: Runnables:", Processor::current().id()); + dbgln("Scheduler[{}j]: Runnables:", Processor::id()); Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision { dbgln(" {:3}/{:2} {:12} @ {:04x}:{:08x}", thread.effective_priority(), @@ -174,7 +174,7 @@ bool Scheduler::pick_next() auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref(); Vector<Thread*, 128> sorted_runnables; for_each_runnable([&](auto& thread) { - if ((thread.affinity() & (1u << Processor::current().id())) == 0) + if ((thread.affinity() & (1u << Processor::id())) == 0) return IterationDecision::Continue; if (thread.state() == Thread::Running && &thread != current_thread) return IterationDecision::Continue; @@ -226,7 +226,7 @@ bool Scheduler::pick_next() if constexpr (SCHEDULER_DEBUG) { dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}", - Processor::current().id(), + Processor::id(), *thread_to_schedule, thread_to_schedule->tss().cs, thread_to_schedule->tss().eip); } @@ -250,7 +250,7 @@ bool Scheduler::yield() scheduler_data.m_pending_donate_reason = nullptr; auto current_thread = Thread::current(); - dbgln<SCHEDULER_DEBUG>("Scheduler[{}]: yielding thread {} in_irq={}", proc.id(), *current_thread, proc.in_irq()); + dbgln<SCHEDULER_DEBUG>("Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq()); ASSERT(current_thread != nullptr); if (proc.in_irq() || proc.in_critical()) { // If we're handling an IRQ we can't switch context, or we're in @@ -264,7 +264,7 @@ bool Scheduler::yield() return false; if constexpr (SCHEDULER_DEBUG) - dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::current().id(), *current_thread, Processor::current().in_irq()); + dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current().in_irq()); return true; } @@ -280,7 +280,7 @@ bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const return Scheduler::yield(); unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary)); - dbgln<SCHEDULER_DEBUG>("Scheduler[{}]: Donating {} ticks to {}, reason={}", proc.id(), ticks_to_donate, *beneficiary, reason); + dbgln<SCHEDULER_DEBUG>("Scheduler[{}]: Donating {} ticks to {}, reason={}", proc.get_id(), ticks_to_donate, *beneficiary, reason); beneficiary->set_ticks_left(ticks_to_donate); return Scheduler::context_switch(beneficiary); @@ -343,7 +343,7 @@ bool Scheduler::context_switch(Thread* thread) from_thread->set_state(Thread::Runnable); #ifdef LOG_EVERY_CONTEXT_SWITCH - dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}", Processor::current().id(), from_thread->tid().value(), thread->tid().value(), thread->priority(), thread->tss().cs, thread->tss().eip); + dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}", Processor::id(), from_thread->tid().value(), thread->tid().value(), thread->priority(), thread->tss().cs, thread->tss().eip); #endif } @@ -470,7 +470,7 @@ Thread* Scheduler::create_ap_idle_thread(u32 cpu) { ASSERT(cpu != 0); // This function is called on the bsp, but creates an idle thread for another AP - ASSERT(Processor::current().id() == 0); + ASSERT(Processor::id() == 0); ASSERT(s_colonel_process); Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), 1 << cpu, false); @@ -491,7 +491,7 @@ void Scheduler::timer_tick(const RegisterState& regs) ASSERT(current_thread->current_trap()); ASSERT(current_thread->current_trap()->regs == ®s); - bool is_bsp = Processor::current().id() == 0; + bool is_bsp = Processor::id() == 0; if (!is_bsp) return; // TODO: This prevents scheduling on other CPUs! if (current_thread->process().is_profiling()) { @@ -544,13 +544,13 @@ void Scheduler::notify_finalizer() void Scheduler::idle_loop(void*) { - dbgln("Scheduler[{}]: idle loop running", Processor::current().id()); + dbgln("Scheduler[{}]: idle loop running", Processor::id()); ASSERT(are_interrupts_enabled()); for (;;) { asm("hlt"); - if (Processor::current().id() == 0) + if (Processor::id() == 0) yield(); } } diff --git a/Kernel/Time/TimeManagement.cpp b/Kernel/Time/TimeManagement.cpp index d4e4992e67..1ac2a4b1cd 100644 --- a/Kernel/Time/TimeManagement.cpp +++ b/Kernel/Time/TimeManagement.cpp @@ -170,7 +170,7 @@ void TimeManagement::initialize(u32 cpu) void TimeManagement::set_system_timer(HardwareTimerBase& timer) { - ASSERT(Processor::current().id() == 0); // This should only be called on the BSP! + ASSERT(Processor::id() == 0); // This should only be called on the BSP! auto original_callback = m_system_timer->set_callback(nullptr); m_system_timer->disable(); timer.set_callback(move(original_callback)); @@ -287,7 +287,7 @@ bool TimeManagement::probe_and_set_non_legacy_hardware_timers() // Update the time. We don't really care too much about the // frequency of the interrupt because we'll query the main // counter to get an accurate time. - if (Processor::current().id() == 0) { + if (Processor::id() == 0) { // TODO: Have the other CPUs call system_timer_tick directly increment_time_since_boot_hpet(); } diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index c1710ff2f4..7397ccd085 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -378,16 +378,16 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) ScopedSpinLock lock(s_mm_lock); if (Processor::current().in_irq()) { dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}", - Processor::current().id(), fault.code(), fault.vaddr(), Processor::current().in_irq()); + Processor::id(), fault.code(), fault.vaddr(), Processor::current().in_irq()); dump_kernel_regions(); return PageFaultResponse::ShouldCrash; } #if PAGE_FAULT_DEBUG - dbgln("MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::current().id(), fault.code(), fault.vaddr()); + dbgln("MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::id(), fault.code(), fault.vaddr()); #endif auto* region = find_region_from_vaddr(fault.vaddr()); if (!region) { - klog() << "CPU[" << Processor::current().id() << "] NP(error) fault at invalid address " << fault.vaddr(); + klog() << "CPU[" << Processor::id() << "] NP(error) fault at invalid address " << fault.vaddr(); return PageFaultResponse::ShouldCrash; } @@ -745,7 +745,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page) mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); ScopedSpinLock lock(s_mm_lock); - u32 pte_idx = 8 + Processor::current().id(); + u32 pte_idx = 8 + Processor::id(); VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE); auto& pte = boot_pd3_pt1023[pte_idx]; @@ -765,7 +765,7 @@ void MemoryManager::unquickmap_page() ScopedSpinLock lock(s_mm_lock); auto& mm_data = get_data(); ASSERT(mm_data.m_quickmap_in_use.is_locked()); - u32 pte_idx = 8 + Processor::current().id(); + u32 pte_idx = 8 + Processor::id(); VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE); auto& pte = boot_pd3_pt1023[pte_idx]; pte.clear(); |