diff options
author | Idan Horowitz <idan.horowitz@gmail.com> | 2021-12-19 18:04:55 +0200 |
---|---|---|
committer | Brian Gianforcaro <b.gianfo@gmail.com> | 2021-12-22 00:02:36 -0800 |
commit | fd3be7ffccc18b92dace3e8f26489b99e0c4cce7 (patch) | |
tree | 82221007149e0712dd7682e291a718f590c442d4 /Kernel/Interrupts/APIC.cpp | |
parent | 6c8f1e62db181e2cdede167b0d7353f0eebfbc5c (diff) | |
download | serenity-fd3be7ffccc18b92dace3e8f26489b99e0c4cce7.zip |
Kernel: Setup APIC AP cores boot environment before init_stage2
Since this range is mapped in already in the kernel page directory, we
can initialize it before jumping into the first kernel process which
lets us avoid mapping in the range into init_stage2's address space.
This brings us half-way to removing the shared bottom 2 MiB mapping in
every process, leaving only the Prekernel.
Diffstat (limited to 'Kernel/Interrupts/APIC.cpp')
-rw-r--r-- | Kernel/Interrupts/APIC.cpp | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp index ca1a4dcd72..4efc9d007b 100644 --- a/Kernel/Interrupts/APIC.cpp +++ b/Kernel/Interrupts/APIC.cpp @@ -322,8 +322,9 @@ UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_reg return region_or_error.release_value(); } -UNMAP_AFTER_INIT void APIC::do_boot_aps() +UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment() { + VERIFY(!m_ap_boot_environment); VERIFY(m_processor_enabled_cnt > 1); u32 aps_to_enable = m_processor_enabled_cnt - 1; @@ -331,11 +332,12 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() // Also account for the data appended to: // * aps_to_enable u32 values for ap_cpu_init_stacks // * aps_to_enable u32 values for ap_cpu_init_processor_info_array - auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32)))); + constexpr u64 apic_startup_region_base = 0x8000; + auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32)))); memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size); // Allocate enough stacks for all APs - Vector<OwnPtr<Memory::Region>> apic_ap_stacks; + m_ap_temporary_boot_stacks.ensure_capacity(aps_to_enable); for (u32 i = 0; i < aps_to_enable; i++) { auto stack_region_or_error = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); if (stack_region_or_error.is_error()) { @@ -344,14 +346,14 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() } auto stack_region = stack_region_or_error.release_value(); stack_region->set_stack(true); - apic_ap_stacks.append(move(stack_region)); + m_ap_temporary_boot_stacks.unchecked_append(move(stack_region)); } // Store pointers to all stacks for the APs to use - auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks); - VERIFY(aps_to_enable == apic_ap_stacks.size()); + auto* ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks); + VERIFY(aps_to_enable == m_ap_temporary_boot_stacks.size()); for (size_t i = 0; i < aps_to_enable; i++) { - ap_stack_array[i] = apic_ap_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size; + ap_stack_array[i] = m_ap_temporary_boot_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size; dbgln_if(APIC_DEBUG, "APIC: CPU[{}] stack at {}", i + 1, VirtualAddress { ap_stack_array[i] }); } @@ -359,7 +361,7 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() m_ap_processor_info.resize(aps_to_enable); for (size_t i = 0; i < aps_to_enable; i++) m_ap_processor_info[i] = make<Processor>(); - auto ap_processor_info_array = &ap_stack_array[aps_to_enable]; + auto* ap_processor_info_array = &ap_stack_array[aps_to_enable]; for (size_t i = 0; i < aps_to_enable; i++) { ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr()); dbgln_if(APIC_DEBUG, "APIC: CPU[{}] processor at {}", i + 1, VirtualAddress { ap_processor_info_array[i] }); @@ -379,6 +381,15 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr0) = read_cr0(); *APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr4) = read_cr4(); + m_ap_boot_environment = move(apic_startup_region); +} + +UNMAP_AFTER_INIT void APIC::do_boot_aps() +{ + VERIFY(m_ap_boot_environment); + VERIFY(m_processor_enabled_cnt > 1); + u32 aps_to_enable = m_processor_enabled_cnt - 1; + // Create an idle thread for each processor. We have to do this here // because we won't be able to send FlushTLB messages, so we have to // have all memory set up for the threads so that when the APs are @@ -414,7 +425,11 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() // NOTE: Since this region is identity-mapped, we have to unmap it manually to prevent the virtual // address range from leaking into the general virtual range allocator. - apic_startup_region->unmap(Memory::Region::ShouldDeallocateVirtualRange::No); + m_ap_boot_environment->unmap(Memory::Region::ShouldDeallocateVirtualRange::No); + m_ap_boot_environment = nullptr; + // When the APs signal that they finished their initialization they have already switched over to their + // idle thread's stack, so the temporary boot stack can be deallocated + m_ap_temporary_boot_stacks.clear(); } UNMAP_AFTER_INIT void APIC::boot_aps() |