diff options
author | Tom <tomut@yahoo.com> | 2020-07-06 07:27:22 -0600 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-07-06 17:07:44 +0200 |
commit | bc107d0b3311677de7bc084cbb75c21b166c8ad5 (patch) | |
tree | 586d1cfa189d72309f978122e15d112539feef61 /Kernel/Interrupts | |
parent | dec27e5e6fad25fedd9203087b8107242369dd2b (diff) | |
download | serenity-bc107d0b3311677de7bc084cbb75c21b166c8ad5.zip |
Kernel: Add SMP IPI support
We can now properly initialize all processors without
crashing by sending SMP IPI messages to synchronize memory
between processors.
We now initialize the APs once we have the scheduler running.
This is so that we can process IPI messages from the other
cores.
Also rework interrupt handling a bit so that it's more of a
1:1 mapping. We need to allocate non-sharable interrupts for
IPIs.
This also fixes the occasional hang/crash because all
CPUs now synchronize memory with each other.
Diffstat (limited to 'Kernel/Interrupts')
-rw-r--r-- | Kernel/Interrupts/APIC.cpp | 300 | ||||
-rw-r--r-- | Kernel/Interrupts/APIC.h | 38 | ||||
-rw-r--r-- | Kernel/Interrupts/GenericInterruptHandler.cpp | 16 | ||||
-rw-r--r-- | Kernel/Interrupts/GenericInterruptHandler.h | 3 | ||||
-rw-r--r-- | Kernel/Interrupts/InterruptManagement.cpp | 3 | ||||
-rw-r--r-- | Kernel/Interrupts/UnhandledInterruptHandler.cpp | 4 |
6 files changed, 295 insertions, 69 deletions
diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp index 71ae5a8bf6..495b903cf6 100644 --- a/Kernel/Interrupts/APIC.cpp +++ b/Kernel/Interrupts/APIC.cpp @@ -30,6 +30,7 @@ #include <AK/Types.h> #include <Kernel/ACPI/Parser.h> #include <Kernel/Arch/i386/CPU.h> +#include <Kernel/Arch/i386/ProcessorInfo.h> #include <Kernel/IO.h> #include <Kernel/Interrupts/APIC.h> #include <Kernel/Interrupts/SpuriousInterruptHandler.h> @@ -39,8 +40,15 @@ #include <Kernel/VM/TypedMapping.h> //#define APIC_DEBUG +//#define APIC_SMP_DEBUG -#define IRQ_APIC_SPURIOUS 0x7f +#define IRQ_APIC_IPI (0xfd - IRQ_VECTOR_BASE) +#define IRQ_APIC_ERR (0xfe - IRQ_VECTOR_BASE) +#define IRQ_APIC_SPURIOUS (0xff - IRQ_VECTOR_BASE) + +#define APIC_ICR_DELIVERY_PENDING (1 << 12) + +#define APIC_ENABLED (1 << 8) #define APIC_BASE_MSR 0x1b @@ -60,7 +68,67 @@ namespace Kernel { -static APIC *s_apic; +static APIC* s_apic; + +class APICIPIInterruptHandler final : public GenericInterruptHandler { +public: + explicit APICIPIInterruptHandler(u8 interrupt_vector) + : GenericInterruptHandler(interrupt_vector, true) + { + } + virtual ~APICIPIInterruptHandler() + { + } + + static void initialize(u8 interrupt_number) + { + new APICIPIInterruptHandler(interrupt_number); + } + + virtual void handle_interrupt(const RegisterState&) override; + + virtual bool eoi() override; + + virtual HandlerType type() const override { return HandlerType::IRQHandler; } + virtual const char* purpose() const override { return "IPI Handler"; } + virtual const char* controller() const override { ASSERT_NOT_REACHED(); } + + virtual size_t sharing_devices_count() const override { return 0; } + virtual bool is_shared_handler() const override { return false; } + virtual bool is_sharing_with_others() const override { return false; } + +private: +}; + +class APICErrInterruptHandler final : public GenericInterruptHandler { +public: + explicit APICErrInterruptHandler(u8 interrupt_vector) + : GenericInterruptHandler(interrupt_vector, true) + { + } + virtual ~APICErrInterruptHandler() + { + } + + static void initialize(u8 interrupt_number) + { + new APICErrInterruptHandler(interrupt_number); + } + + virtual void handle_interrupt(const RegisterState&) override; + + virtual bool eoi() override; + + virtual HandlerType type() const override { return HandlerType::IRQHandler; } + virtual const char* purpose() const override { return "SMP Error Handler"; } + virtual const char* controller() const override { ASSERT_NOT_REACHED(); } + + virtual size_t sharing_devices_count() const override { return 0; } + virtual bool is_shared_handler() const override { return false; } + virtual bool is_sharing_with_others() const override { return false; } + +private: +}; bool APIC::initialized() { @@ -79,8 +147,6 @@ void APIC::initialize() s_apic = new APIC(); } - - PhysicalAddress APIC::get_base() { u32 lo, hi; @@ -107,6 +173,23 @@ u32 APIC::read_register(u32 offset) return *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr()); } +void APIC::set_lvt(u32 offset, u8 interrupt) +{ + write_register(offset, (read_register(offset) & 0xffffffff) | interrupt); +} + +void APIC::set_siv(u32 offset, u8 interrupt) +{ + write_register(offset, (read_register(offset) & 0xffffffff) | interrupt | APIC_ENABLED); +} + +void APIC::wait_for_pending_icr() +{ + while ((read_register(APIC_REG_ICR_LOW) & APIC_ICR_DELIVERY_PENDING) != 0) { + IO::delay(200); + } +} + void APIC::write_icr(const ICRReg& icr) { write_register(APIC_REG_ICR_HIGH, icr.high()); @@ -137,9 +220,9 @@ u8 APIC::spurious_interrupt_vector() return IRQ_APIC_SPURIOUS; } -#define APIC_INIT_VAR_PTR(tpe,vaddr,varname) \ +#define APIC_INIT_VAR_PTR(tpe, vaddr, varname) \ reinterpret_cast<volatile tpe*>(reinterpret_cast<ptrdiff_t>(vaddr) \ - + reinterpret_cast<ptrdiff_t>(&varname) \ + + reinterpret_cast<ptrdiff_t>(&varname) \ - reinterpret_cast<ptrdiff_t>(&apic_ap_start)) bool APIC::init_bsp() @@ -172,8 +255,6 @@ bool APIC::init_bsp() return false; } - u32 processor_cnt = 0; - u32 processor_enabled_cnt = 0; auto madt = map_typed<ACPI::Structures::MADT>(madt_address); size_t entry_index = 0; size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT); @@ -184,29 +265,33 @@ bool APIC::init_bsp() auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry; #ifdef APIC_DEBUG klog() << "APIC: AP found @ MADT entry " << entry_index << ", Processor Id: " << String::format("%02x", plapic_entry->acpi_processor_id) - << " APIC Id: " << String::format("%02x", plapic_entry->apic_id) << " Flags: " << String::format("%08x", plapic_entry->flags); + << " APIC Id: " << String::format("%02x", plapic_entry->apic_id) << " Flags: " << String::format("%08x", plapic_entry->flags); #endif - processor_cnt++; + m_processor_cnt++; if ((plapic_entry->flags & 0x1) != 0) - processor_enabled_cnt++; + m_processor_enabled_cnt++; } madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get()); entries_length -= entry_length; entry_index++; } - - if (processor_enabled_cnt < 1) - processor_enabled_cnt = 1; - if (processor_cnt < 1) - processor_cnt = 1; - klog() << "APIC Processors found: " << processor_cnt << ", enabled: " << processor_enabled_cnt; + if (m_processor_enabled_cnt < 1) + m_processor_enabled_cnt = 1; + if (m_processor_cnt < 1) + m_processor_cnt = 1; - enable_bsp(); + klog() << "APIC Processors found: " << m_processor_cnt << ", enabled: " << m_processor_enabled_cnt; + + enable(0); + return true; +} + +void APIC::do_boot_aps() +{ + if (m_processor_enabled_cnt > 1) { + u32 aps_to_enable = m_processor_enabled_cnt - 1; - if (processor_enabled_cnt > 1) { - u32 aps_to_enable = processor_enabled_cnt - 1; - // Copy the APIC startup code and variables to P0x00008000 // Also account for the data appended to: // * aps_to_enable u32 values for ap_cpu_init_stacks @@ -219,17 +304,17 @@ bool APIC::init_bsp() auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, true, true); if (!stack_region) { klog() << "APIC: Failed to allocate stack for AP #" << i; - return false; + return; } stack_region->set_stack(true); - m_apic_ap_stacks.append(stack_region.release_nonnull()); + m_apic_ap_stacks.append(move(stack_region)); } // Store pointers to all stacks for the APs to use auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks); ASSERT(aps_to_enable == m_apic_ap_stacks.size()); for (size_t i = 0; i < aps_to_enable; i++) { - ap_stack_array[i] = m_apic_ap_stacks[i].vaddr().get() + Thread::default_kernel_stack_size; + ap_stack_array[i] = m_apic_ap_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size; #ifdef APIC_DEBUG klog() << "APIC: CPU[" << (i + 1) << "] stack at " << VirtualAddress(ap_stack_array[i]); #endif @@ -237,9 +322,11 @@ bool APIC::init_bsp() // Allocate Processor structures for all APs and store the pointer to the data m_ap_processor_info.resize(aps_to_enable); + for (size_t i = 0; i < aps_to_enable; i++) + m_ap_processor_info[i] = make<Processor>(); auto ap_processor_info_array = &ap_stack_array[aps_to_enable]; for (size_t i = 0; i < aps_to_enable; i++) { - ap_processor_info_array[i] = FlatPtr(&m_ap_processor_info.at(i)); + ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr()); #ifdef APIC_DEBUG klog() << "APIC: CPU[" << (i + 1) << "] Processor at " << VirtualAddress(ap_processor_info_array[i]); #endif @@ -248,17 +335,25 @@ bool APIC::init_bsp() // Store the BSP's CR3 value for the APs to use *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr3) = MM.kernel_page_directory().cr3(); - + // Store the BSP's GDT and IDT for the APs to use const auto& gdtr = Processor::current().get_gdtr(); *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_gdtr) = FlatPtr(&gdtr); const auto& idtr = get_idtr(); *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_idtr) = FlatPtr(&idtr); - + // Store the BSP's CR0 and CR4 values for the APs to use *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr0) = read_cr0(); *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr4) = read_cr4(); + // Create an idle thread for each processor. We have to do this here + // because we won't be able to send FlushTLB messages, so we have to + // have all memory set up for the threads so that when the APs are + // starting up, they can access all the memory properly + m_ap_idle_threads.resize(aps_to_enable); + for (u32 i = 0; i < aps_to_enable; i++) + m_ap_idle_threads[i] = Scheduler::create_ap_idle_thread(i + 1); + #ifdef APIC_DEBUG klog() << "APIC: Starting " << aps_to_enable << " AP(s)"; #endif @@ -287,50 +382,153 @@ bool APIC::init_bsp() } #ifdef APIC_DEBUG - klog() << "APIC: " << processor_enabled_cnt << " processors are initialized and running"; + klog() << "APIC: " << m_processor_enabled_cnt << " processors are initialized and running"; #endif } - return true; } -void APIC::enable_bsp() +void APIC::boot_aps() { - // FIXME: Ensure this method can only be executed by the BSP. - enable(0); + // We split this into another call because do_boot_aps() will cause + // MM calls upon exit, and we don't want to call smp_enable before that + do_boot_aps(); + + // Enable SMP, which means IPIs may now be sent + Processor::smp_enable(); + + // Now trigger all APs to continue execution (need to do this after + // the regions have been freed so that we don't trigger IPIs + if (m_processor_enabled_cnt > 1) + m_apic_ap_continue.store(1, AK::MemoryOrder::memory_order_release); } void APIC::enable(u32 cpu) { + if (cpu >= 8) { + // TODO: x2apic support? + klog() << "SMP support is currently limited to 8 CPUs!"; + Processor::halt(); + } + + u32 apic_id = (1u << cpu); + + write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (apic_id << 24)); // TODO: only if not in x2apic mode + + // read it back to make sure it's actually set + apic_id = read_register(APIC_REG_LD) >> 24; + Processor::current().info().set_apic_id(apic_id); + #ifdef APIC_DEBUG - klog() << "Enabling local APIC for cpu #" << cpu; + klog() << "Enabling local APIC for cpu #" << cpu << " apic id: " << apic_id; #endif if (cpu == 0) { - // dummy read, apparently to avoid a bug in old CPUs. - read_register(APIC_REG_SIV); - // set spurious interrupt vector - write_register(APIC_REG_SIV, (IRQ_APIC_SPURIOUS + IRQ_VECTOR_BASE) | 0x100); + SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS); - // local destination mode (flat mode) - write_register(APIC_REG_DF, 0xf0000000); + // set error interrupt vector + set_lvt(APIC_REG_LVT_ERR, IRQ_APIC_ERR); + APICErrInterruptHandler::initialize(IRQ_APIC_ERR); - // set destination id (note that this limits it to 8 cpus) - write_register(APIC_REG_LD, 0); + // register IPI interrupt vector + APICIPIInterruptHandler::initialize(IRQ_APIC_IPI); + } - SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS); + // set spurious interrupt vector + set_siv(APIC_REG_SIV, IRQ_APIC_SPURIOUS); + + // local destination mode (flat mode) + write_register(APIC_REG_DF, 0xf0000000); - write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED); - write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED); - write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED); - write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED); - write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL); - write_register(APIC_REG_LVT_ERR, APIC_LVT(0, 0) | APIC_LVT_MASKED); + write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED); + write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED); + write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED); + write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED); + write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL); - write_register(APIC_REG_TPR, 0); - } else { + write_register(APIC_REG_TPR, 0); + + if (cpu > 0) { // Notify the BSP that we are done initializing. It will unmap the startup data at P8000 - m_apic_ap_count++; + m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel); +#ifdef APIC_DEBUG + klog() << "APIC: cpu #" << cpu << " initialized, waiting for all others"; +#endif + // The reason we're making all APs wait until the BSP signals them is that + // we don't want APs to trigger IPIs (e.g. through MM) while the BSP + // is unable to process them + while (!m_apic_ap_continue.load(AK::MemoryOrder::memory_order_consume)) { + IO::delay(200); + } + + // boot_aps() freed memory, so we need to update our tlb + Processor::flush_entire_tlb_local(); } } +Thread* APIC::get_idle_thread(u32 cpu) const +{ + ASSERT(cpu > 0); + return m_ap_idle_threads[cpu - 1]; +} + +void APIC::init_finished(u32 cpu) +{ + // This method is called once the boot stack is no longer needed + ASSERT(cpu > 0); + ASSERT(cpu <= m_apic_ap_count); + ASSERT(m_apic_ap_stacks[cpu - 1]); +#ifdef APIC_DEBUG + klog() << "APIC: boot stack for for cpu #" << cpu << " no longer needed"; +#endif + m_apic_ap_stacks[cpu - 1].clear(); +} + +void APIC::broadcast_ipi() +{ +#ifdef APIC_SMP_DEBUG + klog() << "SMP: Broadcast IPI from cpu #" << Processor::current().id(); +#endif + wait_for_pending_icr(); + write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf)); +} + +void APIC::send_ipi(u32 cpu) +{ + auto& proc = Processor::current(); +#ifdef APIC_SMP_DEBUG + klog() << "SMP: Send IPI from cpu #" << proc.id() << " to cpu #" << cpu; +#endif + ASSERT(cpu != proc.id()); + ASSERT(cpu < 8); + wait_for_pending_icr(); + write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand, 1u << cpu)); +} + +void APICIPIInterruptHandler::handle_interrupt(const RegisterState&) +{ +#ifdef APIC_SMP_DEBUG + klog() << "APIC IPI on cpu #" << Processor::current().id(); +#endif +} + +bool APICIPIInterruptHandler::eoi() +{ +#ifdef APIC_SMP_DEBUG + klog() << "SMP: IPI eoi"; +#endif + APIC::the().eoi(); + return true; +} + +void APICErrInterruptHandler::handle_interrupt(const RegisterState&) +{ + klog() << "APIC: SMP error on cpu #" << Processor::current().id(); +} + +bool APICErrInterruptHandler::eoi() +{ + APIC::the().eoi(); + return true; +} + } diff --git a/Kernel/Interrupts/APIC.h b/Kernel/Interrupts/APIC.h index 17a2496fe8..f845c321ab 100644 --- a/Kernel/Interrupts/APIC.h +++ b/Kernel/Interrupts/APIC.h @@ -27,11 +27,14 @@ #pragma once #include <AK/Types.h> -#include <AK/NonnullOwnPtrVector.h> #include <Kernel/VM/MemoryManager.h> namespace Kernel { +struct LocalAPIC { + u32 apic_id; +}; + class APIC { public: static APIC& the(); @@ -39,14 +42,20 @@ public: static bool initialized(); bool init_bsp(); - void enable_bsp(); void eoi(); + void boot_aps(); void enable(u32 cpu); + void init_finished(u32 cpu); + void broadcast_ipi(); + void send_ipi(u32 cpu); static u8 spurious_interrupt_vector(); + Thread* get_idle_thread(u32 cpu) const; + u32 enabled_processor_count() const { return m_processor_enabled_cnt; } private: class ICRReg { - u32 m_reg { 0 }; + u32 m_low { 0 }; + u32 m_high { 0 }; public: enum DeliveryMode { @@ -76,25 +85,34 @@ private: AllExcludingSelf = 0x3, }; - ICRReg(u8 vector, DeliveryMode delivery_mode, DestinationMode destination_mode, Level level, TriggerMode trigger_mode, DestinationShorthand destination) - : m_reg(vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destination << 18)) + ICRReg(u8 vector, DeliveryMode delivery_mode, DestinationMode destination_mode, Level level, TriggerMode trigger_mode, DestinationShorthand destinationShort, u8 destination = 0) + : m_low(vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destinationShort << 18)), + m_high((u32)destination << 24) { } - u32 low() const { return m_reg; } - u32 high() const { return 0; } + u32 low() const { return m_low; } + u32 high() const { return m_high; } }; OwnPtr<Region> m_apic_base; - NonnullOwnPtrVector<Region> m_apic_ap_stacks; - Vector<Processor> m_ap_processor_info; - AK::Atomic<u32> m_apic_ap_count{0}; + Vector<OwnPtr<Region>> m_apic_ap_stacks; + Vector<OwnPtr<Processor>> m_ap_processor_info; + Vector<Thread*> m_ap_idle_threads; + AK::Atomic<u8> m_apic_ap_count{0}; + AK::Atomic<u8> m_apic_ap_continue{0}; + u32 m_processor_cnt{0}; + u32 m_processor_enabled_cnt{0}; static PhysicalAddress get_base(); static void set_base(const PhysicalAddress& base); void write_register(u32 offset, u32 value); u32 read_register(u32 offset); + void set_lvt(u32 offset, u8 interrupt); + void set_siv(u32 offset, u8 interrupt); + void wait_for_pending_icr(); void write_icr(const ICRReg& icr); + void do_boot_aps(); }; } diff --git a/Kernel/Interrupts/GenericInterruptHandler.cpp b/Kernel/Interrupts/GenericInterruptHandler.cpp index 12f078d811..2101c705e8 100644 --- a/Kernel/Interrupts/GenericInterruptHandler.cpp +++ b/Kernel/Interrupts/GenericInterruptHandler.cpp @@ -36,20 +36,28 @@ GenericInterruptHandler& GenericInterruptHandler::from(u8 interrupt_number) return get_interrupt_handler(interrupt_number); } -GenericInterruptHandler::GenericInterruptHandler(u8 interrupt_number) - : m_interrupt_number(interrupt_number) +GenericInterruptHandler::GenericInterruptHandler(u8 interrupt_number, bool disable_remap) + : m_interrupt_number(interrupt_number), + m_disable_remap(disable_remap) { - register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this); + if (m_disable_remap) + register_generic_interrupt_handler(m_interrupt_number, *this); + else + register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this); } GenericInterruptHandler::~GenericInterruptHandler() { - unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this); + if (m_disable_remap) + unregister_generic_interrupt_handler(m_interrupt_number, *this); + else + unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this); } void GenericInterruptHandler::change_interrupt_number(u8 number) { ASSERT_INTERRUPTS_ENABLED(); + ASSERT(!m_disable_remap); unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(interrupt_number()), *this); m_interrupt_number = number; register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(interrupt_number()), *this); diff --git a/Kernel/Interrupts/GenericInterruptHandler.h b/Kernel/Interrupts/GenericInterruptHandler.h index 61cabf10c1..f479ee3065 100644 --- a/Kernel/Interrupts/GenericInterruptHandler.h +++ b/Kernel/Interrupts/GenericInterruptHandler.h @@ -65,11 +65,12 @@ public: protected: void change_interrupt_number(u8 number); - explicit GenericInterruptHandler(u8 interrupt_number); + explicit GenericInterruptHandler(u8 interrupt_number, bool disable_remap = false); private: size_t m_invoking_count { 0 }; bool m_enabled { false }; u8 m_interrupt_number { 0 }; + bool m_disable_remap { false }; }; } diff --git a/Kernel/Interrupts/InterruptManagement.cpp b/Kernel/Interrupts/InterruptManagement.cpp index 058b45dcd3..e368c811d9 100644 --- a/Kernel/Interrupts/InterruptManagement.cpp +++ b/Kernel/Interrupts/InterruptManagement.cpp @@ -188,11 +188,12 @@ void InterruptManagement::switch_to_ioapic_mode() dbg() << "Interrupts: Detected " << irq_controller->model(); } } - APIC::the().init_bsp(); if (auto mp_parser = MultiProcessorParser::autodetect()) { m_pci_interrupt_overrides = mp_parser->get_pci_interrupt_redirections(); } + + APIC::the().init_bsp(); } void InterruptManagement::locate_apic_data() diff --git a/Kernel/Interrupts/UnhandledInterruptHandler.cpp b/Kernel/Interrupts/UnhandledInterruptHandler.cpp index 077e4db0ba..a9a6b63cb0 100644 --- a/Kernel/Interrupts/UnhandledInterruptHandler.cpp +++ b/Kernel/Interrupts/UnhandledInterruptHandler.cpp @@ -35,13 +35,13 @@ UnhandledInterruptHandler::UnhandledInterruptHandler(u8 interrupt_vector) void UnhandledInterruptHandler::handle_interrupt(const RegisterState&) { dbg() << "Interrupt: Unhandled vector " << interrupt_number() << " was invoked for handle_interrupt(RegisterState&)."; - hang(); + Processor::halt(); } [[noreturn]] bool UnhandledInterruptHandler::eoi() { dbg() << "Interrupt: Unhandled vector " << interrupt_number() << " was invoked for eoi()."; - hang(); + Processor::halt(); } UnhandledInterruptHandler::~UnhandledInterruptHandler() |