summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/ACPI/Definitions.h8
-rw-r--r--Kernel/Arch/i386/Boot/boot.S169
-rw-r--r--Kernel/Arch/i386/CPU.cpp32
-rw-r--r--Kernel/Arch/i386/CPU.h10
-rw-r--r--Kernel/Interrupts/APIC.cpp269
-rw-r--r--Kernel/Interrupts/APIC.h71
-rw-r--r--Kernel/Interrupts/IOAPIC.cpp2
-rw-r--r--Kernel/Interrupts/InterruptManagement.cpp3
-rw-r--r--Kernel/init.cpp21
9 files changed, 470 insertions, 115 deletions
diff --git a/Kernel/ACPI/Definitions.h b/Kernel/ACPI/Definitions.h
index f386dc48b5..899781f560 100644
--- a/Kernel/ACPI/Definitions.h
+++ b/Kernel/ACPI/Definitions.h
@@ -290,6 +290,14 @@ struct [[gnu::packed]] IOAPIC
u32 gsi_base;
};
+struct [[gnu::packed]] ProcessorLocalAPIC
+{
+ MADTEntryHeader h;
+ u8 acpi_processor_id;
+ u8 apic_id;
+ u32 flags;
+};
+
struct [[gnu::packed]] InterruptSourceOverride
{
MADTEntryHeader h;
diff --git a/Kernel/Arch/i386/Boot/boot.S b/Kernel/Arch/i386/Boot/boot.S
index f52db9d726..8c8ea06a8d 100644
--- a/Kernel/Arch/i386/Boot/boot.S
+++ b/Kernel/Arch/i386/Boot/boot.S
@@ -218,7 +218,6 @@ start:
pushl $exit_message
call kprintf
add $4, %esp
-
cli
loop:
@@ -227,3 +226,171 @@ loop:
exit_message:
.asciz "Kernel exited."
+
+.extern init_ap
+.type init_ap, @function
+
+/*
+ The apic_ap_start function will be loaded to P0x00008000 where the APIC
+ will boot the AP from in real mode. This code also contains space for
+ special variables that *must* remain here. When initializing the APIC,
+ the code here gets copied to P0x00008000, the variables in here get
+ populated and then the the boot of the APs will be triggered. Having
+ the variables here allows us to access them from real mode. Also, the
+ code here avoids the need for relocation entries.
+
+ Basically, the variables between apic_ap_start and end_apic_ap_start
+ *MUST* remain here and cannot be moved into a .bss or any other location.
+*/
+.global apic_ap_start
+.type apic_ap_start, @function
+apic_ap_start:
+.code16
+ cli
+ jmp $0x800, $(1f - apic_ap_start) /* avoid relocation entries */
+1:
+ mov %cs, %ax
+ mov %ax, %ds
+
+ /* Generate a new processor id. This is not the APIC id. We just
+ need a way to find ourselves a stack without stomping on other
+ APs that may be doing this concurrently. */
+ xor %ax, %ax
+ mov %ax, %bp
+ inc %ax
+ lock; xaddw %ax, %ds:(ap_cpu_id - apic_ap_start)(%bp) /* avoid relocation entries */
+ mov %ax, %bx
+
+ xor %ax, %ax
+ mov %ax, %sp
+
+ /* load the first temporary gdt */
+ lgdt (ap_cpu_gdtr_initial - apic_ap_start)
+
+ /* enable PM */
+ movl %cr0, %eax
+ orl $1, %eax
+ movl %eax, %cr0
+
+ ljmpl $8, $(apic_ap_start32 - apic_ap_start + 0x8000)
+apic_ap_start32:
+.code32
+ mov $0x10, %ax
+ mov %ax, %ss
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+
+ movl $0x8000, %ebp
+
+ /* find our allocated stack based on the generated id */
+ andl 0x0000FFFF, %ebx
+ movl %ebx, %esi
+ movl (ap_cpu_init_stacks - apic_ap_start)(%ebp, %ebx, 4), %esp
+
+ /* check if we support NX and enable it if we do */
+ movl $0x80000001, %eax
+ cpuid
+ testl $0x100000, %edx
+ je (1f - apic_ap_start + 0x8000)
+ /* turn on IA32_EFER.NXE */
+ movl $0xc0000080, %ecx
+ rdmsr
+ orl $0x800, %eax
+ wrmsr
+1:
+
+ /* load the bsp's cr3 value */
+ movl (ap_cpu_init_cr3 - apic_ap_start)(%ebp), %eax
+ movl %eax, %cr3
+
+ /* enable PAE + PSE */
+ movl %cr4, %eax
+ orl $0x60, %eax
+ movl %eax, %cr4
+
+ /* enable PG */
+ movl %cr0, %eax
+ orl $0x80000000, %eax
+ movl %eax, %cr0
+
+ /* load a second temporary gdt that points above 3GB */
+ lgdt (ap_cpu_gdtr_initial2 - apic_ap_start + 0xc0008000)
+
+ /* jump above 3GB into our identity mapped area now */
+ ljmp $8, $(1f - apic_ap_start + 0xc0008000)
+1:
+ /* flush the TLB */
+ movl %cr3, %eax
+ movl %eax, %cr3
+
+ movl $0xc0008000, %ebp
+
+ /* now load the final gdt and idt from the identity mapped area */
+ movl (ap_cpu_gdtr - apic_ap_start)(%ebp), %eax
+ lgdt (%eax)
+ movl (ap_cpu_idtr - apic_ap_start)(%ebp), %eax
+ lidt (%eax)
+
+ /* set same cr0 and cr4 values as the BSP */
+ movl (ap_cpu_init_cr0 - apic_ap_start)(%ebp), %eax
+ movl %eax, %cr0
+ movl (ap_cpu_init_cr4 - apic_ap_start)(%ebp), %eax
+ movl %eax, %cr4
+
+ xor %ebp, %ebp
+ cld
+
+ /* push the arbitrary cpu id, 0 representing the bsp and call into c++ */
+ inc %esi
+ push %esi
+ /* We are in identity mapped P0x8000 and the BSP will unload this code
+ once all APs are initialized, so call init_ap but return to our
+ infinite loop */
+ push $loop
+ ljmp $8, $init_ap
+
+.align 4
+.global apic_ap_start_size
+apic_ap_start_size:
+ .2byte end_apic_ap_start - apic_ap_start
+ap_cpu_id:
+ .2byte 0x0
+ap_cpu_gdt:
+ /* null */
+ .8byte 0x0
+ /* code */
+ .4byte 0x0000FFFF
+ .4byte 0x00cf9a00
+ /* data */
+ .4byte 0x0000FFFF
+ .4byte 0x00cf9200
+ap_cpu_gdt_end:
+ap_cpu_gdtr_initial:
+ .2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
+ .4byte (ap_cpu_gdt - apic_ap_start) + 0x8000
+ap_cpu_gdtr_initial2:
+ .2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
+ .4byte (ap_cpu_gdt - apic_ap_start) + 0xc0008000
+.global ap_cpu_gdtr
+ap_cpu_gdtr:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_idtr
+ap_cpu_idtr:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_cr0
+ap_cpu_init_cr0:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_cr3
+ap_cpu_init_cr3:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_cr4
+ap_cpu_init_cr4:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_stacks
+ap_cpu_init_stacks:
+ /* array of allocated stack pointers */
+ /* NOTE: ap_cpu_init_stacks must be the last variable before
+ end_apic_ap_start! */
+.set end_apic_ap_start, .
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index affd7705d1..3fce021fcb 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -46,12 +46,6 @@
namespace Kernel {
-struct [[gnu::packed]] DescriptorTablePointer
-{
- u16 limit;
- void* address;
-};
-
static DescriptorTablePointer s_idtr;
static DescriptorTablePointer s_gdtr;
static Descriptor s_idt[256];
@@ -391,6 +385,16 @@ void flush_gdt()
: "memory");
}
+const DescriptorTablePointer& get_gdtr()
+{
+ return s_gdtr;
+}
+
+const DescriptorTablePointer& get_idtr()
+{
+ return s_idtr;
+}
+
void gdt_init()
{
s_gdt_length = 5;
@@ -819,6 +823,14 @@ void cpu_setup()
}
}
+u32 read_cr0()
+{
+ u32 cr0;
+ asm("movl %%cr0, %%eax"
+ : "=a"(cr0));
+ return cr0;
+}
+
u32 read_cr3()
{
u32 cr3;
@@ -833,6 +845,14 @@ void write_cr3(u32 cr3)
: "memory");
}
+u32 read_cr4()
+{
+ u32 cr4;
+ asm("movl %%cr4, %%eax"
+ : "=a"(cr4));
+ return cr4;
+}
+
u32 read_dr6()
{
u32 dr6;
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index 9b6e771be0..b59819bc3f 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -41,6 +41,12 @@ class MemoryManager;
class PageDirectory;
class PageTableEntry;
+struct [[gnu::packed]] DescriptorTablePointer
+{
+ u16 limit;
+ void* address;
+};
+
struct [[gnu::packed]] TSS32
{
u16 backlink, __blh;
@@ -248,6 +254,8 @@ public:
class GenericInterruptHandler;
struct RegisterState;
+const DescriptorTablePointer& get_gdtr();
+const DescriptorTablePointer& get_idtr();
void gdt_init();
void idt_init();
void sse_init();
@@ -477,8 +485,10 @@ inline FlatPtr offset_in_page(const void* address)
return offset_in_page((FlatPtr)address);
}
+u32 read_cr0();
u32 read_cr3();
void write_cr3(u32);
+u32 read_cr4();
u32 read_dr6();
diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp
index b9ee632540..8498b20289 100644
--- a/Kernel/Interrupts/APIC.cpp
+++ b/Kernel/Interrupts/APIC.cpp
@@ -25,13 +25,17 @@
*/
#include <AK/Assertions.h>
+#include <AK/Memory.h>
#include <AK/StringView.h>
#include <AK/Types.h>
+#include <Kernel/ACPI/Parser.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/IO.h>
#include <Kernel/Interrupts/APIC.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
+#include <Kernel/Thread.h>
#include <Kernel/VM/MemoryManager.h>
+#include <Kernel/VM/PageDirectory.h>
#include <Kernel/VM/TypedMapping.h>
#define IRQ_APIC_SPURIOUS 0x7f
@@ -54,51 +58,28 @@
namespace Kernel {
-namespace APIC {
-
-class ICRReg {
- u32 m_reg { 0 };
-
-public:
- enum DeliveryMode {
- Fixed = 0x0,
- LowPriority = 0x1,
- SMI = 0x2,
- NMI = 0x4,
- INIT = 0x5,
- StartUp = 0x6,
- };
- enum DestinationMode {
- Physical = 0x0,
- Logical = 0x0,
- };
- enum Level {
- DeAssert = 0x0,
- Assert = 0x1
- };
- enum class TriggerMode {
- Edge = 0x0,
- Level = 0x1,
- };
- enum DestinationShorthand {
- NoShorthand = 0x0,
- Self = 0x1,
- AllIncludingSelf = 0x2,
- AllExcludingSelf = 0x3,
- };
-
- ICRReg(u8 vector, DeliveryMode delivery_mode, DestinationMode destination_mode, Level level, TriggerMode trigger_mode, DestinationShorthand destination)
- : m_reg(vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destination << 18))
- {
- }
+static APIC *s_apic;
- u32 low() const { return m_reg; }
- u32 high() const { return 0; }
-};
+bool APIC::initialized()
+{
+ return (s_apic != nullptr);
+}
-static PhysicalAddress g_apic_base;
+APIC& APIC::the()
+{
+ ASSERT(APIC::initialized());
+ return *s_apic;
+}
-static PhysicalAddress get_base()
+void APIC::initialize()
+{
+ ASSERT(!APIC::initialized());
+ s_apic = new APIC();
+}
+
+
+
+PhysicalAddress APIC::get_base()
{
u32 lo, hi;
MSR msr(APIC_BASE_MSR);
@@ -106,7 +87,7 @@ static PhysicalAddress get_base()
return PhysicalAddress(lo & 0xfffff000);
}
-static void set_base(const PhysicalAddress& base)
+void APIC::set_base(const PhysicalAddress& base)
{
u32 hi = 0;
u32 lo = base.get() | 0x800;
@@ -114,17 +95,17 @@ static void set_base(const PhysicalAddress& base)
msr.set(lo, hi);
}
-static void write_register(u32 offset, u32 value)
+void APIC::write_register(u32 offset, u32 value)
{
- *map_typed_writable<u32>(g_apic_base.offset(offset)) = value;
+ *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr()) = value;
}
-static u32 read_register(u32 offset)
+u32 APIC::read_register(u32 offset)
{
- return *map_typed<u32>(g_apic_base.offset(offset));
+ return *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr());
}
-static void write_icr(const ICRReg& icr)
+void APIC::write_icr(const ICRReg& icr)
{
write_register(APIC_REG_ICR_HIGH, icr.high());
write_register(APIC_REG_ICR_LOW, icr.low());
@@ -134,32 +115,31 @@ static void write_icr(const ICRReg& icr)
#define APIC_LVT_TRIGGER_LEVEL (1 << 14)
#define APIC_LVT(iv, dm) ((iv & 0xff) | ((dm & 0x7) << 8))
-asm(
- ".globl apic_ap_start \n"
- ".type apic_ap_start, @function \n"
- "apic_ap_start: \n"
- ".set begin_apic_ap_start, . \n"
- " jmp apic_ap_start\n" // TODO: implement
- ".set end_apic_ap_start, . \n"
- "\n"
- ".globl apic_ap_start_size \n"
- "apic_ap_start_size: \n"
- ".word end_apic_ap_start - begin_apic_ap_start \n");
-
extern "C" void apic_ap_start(void);
extern "C" u16 apic_ap_start_size;
-
-void eoi()
+extern "C" u32 ap_cpu_init_stacks;
+extern "C" u32 ap_cpu_init_cr0;
+extern "C" u32 ap_cpu_init_cr3;
+extern "C" u32 ap_cpu_init_cr4;
+extern "C" u32 ap_cpu_gdtr;
+extern "C" u32 ap_cpu_idtr;
+
+void APIC::eoi()
{
write_register(APIC_REG_EOI, 0x0);
}
-u8 spurious_interrupt_vector()
+u8 APIC::spurious_interrupt_vector()
{
return IRQ_APIC_SPURIOUS;
}
-bool init()
+#define APIC_INIT_VAR_PTR(tpe,vaddr,varname) \
+ reinterpret_cast<volatile tpe*>(reinterpret_cast<ptrdiff_t>(vaddr) \
+ + reinterpret_cast<ptrdiff_t>(&varname) \
+ - reinterpret_cast<ptrdiff_t>(&apic_ap_start))
+
+bool APIC::init_bsp()
{
// FIXME: Use the ACPI MADT table
if (!MSR::have())
@@ -174,44 +154,88 @@ bool init()
klog() << "Initializing APIC, base: " << apic_base;
set_base(apic_base);
- g_apic_base = apic_base;
-
- return true;
-}
+ m_apic_base = MM.allocate_kernel_region(apic_base.page_base(), PAGE_ROUND_UP(1), {}, Region::Access::Read | Region::Access::Write);
-void enable_bsp()
-{
- // FIXME: Ensure this method can only be executed by the BSP.
- enable(0);
-}
-
-void enable(u32 cpu)
-{
- klog() << "Enabling local APIC for cpu #" << cpu;
-
- // dummy read, apparently to avoid a bug in old CPUs.
- read_register(APIC_REG_SIV);
- // set spurious interrupt vector
- write_register(APIC_REG_SIV, (IRQ_APIC_SPURIOUS + IRQ_VECTOR_BASE) | 0x100);
-
- // local destination mode (flat mode)
- write_register(APIC_REG_DF, 0xf0000000);
-
- // set destination id (note that this limits it to 8 cpus)
- write_register(APIC_REG_LD, 0);
-
- SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
+ auto rsdp = ACPI::StaticParsing::find_rsdp();
+ if (!rsdp.has_value()) {
+ klog() << "APIC: RSDP not found";
+ return false;
+ }
+ auto madt_address = ACPI::StaticParsing::find_table(rsdp.value(), "APIC");
+ if (madt_address.is_null()) {
+ klog() << "APIC: MADT table not found";
+ return false;
+ }
- write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
- write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
- write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
- write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
- write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
- write_register(APIC_REG_LVT_ERR, APIC_LVT(0, 0) | APIC_LVT_MASKED);
+ u32 processor_cnt = 0;
+ u32 processor_enabled_cnt = 0;
+ auto madt = map_typed<ACPI::Structures::MADT>(madt_address);
+ size_t entry_index = 0;
+ size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
+ auto* madt_entry = madt->entries;
+ while (entries_length > 0) {
+ size_t entry_length = madt_entry->length;
+ if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::LocalAPIC) {
+ auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry;
+ klog() << "APIC: AP found @ MADT entry " << entry_index << ", Processor Id: " << String::format("%02x", plapic_entry->acpi_processor_id)
+ << " APIC Id: " << String::format("%02x", plapic_entry->apic_id) << " Flags: " << String::format("%08x", plapic_entry->flags);
+ processor_cnt++;
+ if ((plapic_entry->flags & 0x1) != 0)
+ processor_enabled_cnt++;
+ }
+ madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
+ entries_length -= entry_length;
+ entry_index++;
+ }
+
+ if (processor_enabled_cnt < 1)
+ processor_enabled_cnt = 1;
+ if (processor_cnt < 1)
+ processor_cnt = 1;
+
+ klog() << "APIC Processors found: " << processor_cnt << ", enabled: " << processor_enabled_cnt;
+
+ enable_bsp();
+
+ if (processor_enabled_cnt > 1) {
+ u32 aps_to_enable = processor_enabled_cnt - 1;
+
+ // Copy the APIC startup code and variables to P0x00008000
+ auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), PAGE_ROUND_UP(apic_ap_start_size), {}, Region::Access::Read | Region::Access::Write | Region::Access::Execute);
+ memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size);
+
+ // Allocate enough stacks for all APs
+ for (u32 i = 0; i < aps_to_enable; i++) {
+ auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, true, true);
+ if (!stack_region) {
+ klog() << "APIC: Failed to allocate stack for AP #" << i;
+ return false;
+ }
+ stack_region->set_stack(true);
+ klog() << "APIC: Allocated AP #" << i << " stack at " << stack_region->vaddr();
+ m_apic_ap_stacks.append(stack_region.release_nonnull());
+ }
- write_register(APIC_REG_TPR, 0);
+ // Store pointers to all stacks for the APs to use
+ auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks);
+ for (size_t i = 0; i < m_apic_ap_stacks.size(); i++)
+ ap_stack_array[i] = m_apic_ap_stacks[i].vaddr().get() + Thread::default_kernel_stack_size;
+
+ // Store the BSP's CR3 value for the APs to use
+ *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr3) = MM.kernel_page_directory().cr3();
+
+ // Store the BSP's GDT and IDT for the APs to use
+ const auto& gdtr = get_gdtr();
+ *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_gdtr) = FlatPtr(&gdtr);
+ const auto& idtr = get_idtr();
+ *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_idtr) = FlatPtr(&idtr);
+
+ // Store the BSP's CR0 and CR4 values for the APs to use
+ *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr0) = read_cr0();
+ *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr4) = read_cr4();
+
+ klog() << "APIC: Starting " << aps_to_enable << " AP(s)";
- if (cpu != 0) {
// INIT
write_icr(ICRReg(0, ICRReg::INIT, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf));
@@ -223,9 +247,58 @@ void enable(u32 cpu)
IO::delay(200);
}
+
+ // Now wait until the ap_cpu_init_pending variable dropped to 0, which means all APs are initialized and no longer need these special mappings
+ if (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable) {
+ klog() << "APIC: Waiting for " << aps_to_enable << " AP(s) to finish initialization...";
+ do {
+ // Wait a little bit
+ IO::delay(200);
+ } while (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable);
+ }
+
+ klog() << "APIC: " << processor_enabled_cnt << " processors are initialized and running";
}
+ return true;
+}
+
+void APIC::enable_bsp()
+{
+ // FIXME: Ensure this method can only be executed by the BSP.
+ enable(0);
}
+void APIC::enable(u32 cpu)
+{
+ if (cpu == 0)// FIXME: once memory management can deal with it, re-enable for all
+ klog() << "Enabling local APIC for cpu #" << cpu;
+
+ if (cpu == 0) {
+ // dummy read, apparently to avoid a bug in old CPUs.
+ read_register(APIC_REG_SIV);
+ // set spurious interrupt vector
+ write_register(APIC_REG_SIV, (IRQ_APIC_SPURIOUS + IRQ_VECTOR_BASE) | 0x100);
+
+ // local destination mode (flat mode)
+ write_register(APIC_REG_DF, 0xf0000000);
+
+ // set destination id (note that this limits it to 8 cpus)
+ write_register(APIC_REG_LD, 0);
+
+ SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
+
+ write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
+ write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
+ write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
+ write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
+ write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
+ write_register(APIC_REG_LVT_ERR, APIC_LVT(0, 0) | APIC_LVT_MASKED);
+
+ write_register(APIC_REG_TPR, 0);
+ } else {
+ // Notify the BSP that we are done initializing. It will unmap the startup data at P8000
+ m_apic_ap_count++;
+ }
}
}
diff --git a/Kernel/Interrupts/APIC.h b/Kernel/Interrupts/APIC.h
index 4db0bf57d8..cce63ca57e 100644
--- a/Kernel/Interrupts/APIC.h
+++ b/Kernel/Interrupts/APIC.h
@@ -27,16 +27,73 @@
#pragma once
#include <AK/Types.h>
+#include <AK/NonnullOwnPtrVector.h>
+#include <Kernel/VM/MemoryManager.h>
namespace Kernel {
-namespace APIC {
+class APIC {
+public:
+ static APIC& the();
+ static void initialize();
+ static bool initialized();
-void enable_bsp();
-void eoi();
-bool init();
-void enable(u32 cpu);
-u8 spurious_interrupt_vector();
-}
+ bool init_bsp();
+ void enable_bsp();
+ void eoi();
+ void enable(u32 cpu);
+ static u8 spurious_interrupt_vector();
+
+private:
+ class ICRReg {
+ u32 m_reg { 0 };
+
+ public:
+ enum DeliveryMode {
+ Fixed = 0x0,
+ LowPriority = 0x1,
+ SMI = 0x2,
+ NMI = 0x4,
+ INIT = 0x5,
+ StartUp = 0x6,
+ };
+ enum DestinationMode {
+ Physical = 0x0,
+ Logical = 0x1,
+ };
+ enum Level {
+ DeAssert = 0x0,
+ Assert = 0x1
+ };
+ enum class TriggerMode {
+ Edge = 0x0,
+ Level = 0x1,
+ };
+ enum DestinationShorthand {
+ NoShorthand = 0x0,
+ Self = 0x1,
+ AllIncludingSelf = 0x2,
+ AllExcludingSelf = 0x3,
+ };
+
+ ICRReg(u8 vector, DeliveryMode delivery_mode, DestinationMode destination_mode, Level level, TriggerMode trigger_mode, DestinationShorthand destination)
+ : m_reg(vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destination << 18))
+ {
+ }
+
+ u32 low() const { return m_reg; }
+ u32 high() const { return 0; }
+ };
+
+ OwnPtr<Region> m_apic_base;
+ NonnullOwnPtrVector<Region> m_apic_ap_stacks;
+ AK::Atomic<u32> m_apic_ap_count{0};
+
+ static PhysicalAddress get_base();
+ static void set_base(const PhysicalAddress& base);
+ void write_register(u32 offset, u32 value);
+ u32 read_register(u32 offset);
+ void write_icr(const ICRReg& icr);
+};
}
diff --git a/Kernel/Interrupts/IOAPIC.cpp b/Kernel/Interrupts/IOAPIC.cpp
index e2b9641ccd..c2a5a5c86a 100644
--- a/Kernel/Interrupts/IOAPIC.cpp
+++ b/Kernel/Interrupts/IOAPIC.cpp
@@ -297,7 +297,7 @@ void IOAPIC::eoi(const GenericInterruptHandler& handler) const
ASSERT(!is_hard_disabled());
ASSERT(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
ASSERT(handler.type() != HandlerType::SpuriousInterruptHandler);
- APIC::eoi();
+ APIC::the().eoi();
}
u16 IOAPIC::get_isr() const
diff --git a/Kernel/Interrupts/InterruptManagement.cpp b/Kernel/Interrupts/InterruptManagement.cpp
index ac39cc576e..db24edf8bd 100644
--- a/Kernel/Interrupts/InterruptManagement.cpp
+++ b/Kernel/Interrupts/InterruptManagement.cpp
@@ -188,8 +188,7 @@ void InterruptManagement::switch_to_ioapic_mode()
dbg() << "Interrupts: Detected " << irq_controller->model();
}
}
- APIC::init();
- APIC::enable_bsp();
+ APIC::the().init_bsp();
if (auto mp_parser = MultiProcessorParser::autodetect()) {
m_pci_interrupt_overrides = mp_parser->get_pci_interrupt_redirections();
diff --git a/Kernel/init.cpp b/Kernel/init.cpp
index 9909b20ec2..0dacc83a6a 100644
--- a/Kernel/init.cpp
+++ b/Kernel/init.cpp
@@ -121,6 +121,7 @@ extern "C" [[noreturn]] void init()
for (ctor_func_t* ctor = &start_ctors; ctor < &end_ctors; ctor++)
(*ctor)();
+ APIC::initialize();
InterruptManagement::initialize();
ACPI::initialize();
@@ -161,6 +162,26 @@ extern "C" [[noreturn]] void init()
ASSERT_NOT_REACHED();
}
+//
+// This is where C++ execution begins for APs, after boot.S transfers control here.
+//
+// The purpose of init_ap() is to initialize APs for multi-tasking.
+//
+extern "C" [[noreturn]] void init_ap(u32 cpu)
+{
+ APIC::the().enable(cpu);
+
+#if 0
+ Scheduler::idle_loop();
+#else
+ // FIXME: remove once schedule can handle APs
+ cli();
+ for (;;)
+ asm volatile("hlt");
+#endif
+ ASSERT_NOT_REACHED();
+}
+
void init_stage2()
{
SyncTask::spawn();