diff options
author | Andreas Kling <kling@serenityos.org> | 2021-08-06 21:35:56 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-08-06 21:35:56 +0200 |
commit | cdab5b2091689b2f532327dc0d4203f0ba9fe90d (patch) | |
tree | 9b0a949700bf9aa00a148b371da83e16e2a4cf52 | |
parent | 16ac3bbfd702e9155a377fe6e6bf04281ee3d290 (diff) | |
download | serenity-cdab5b2091689b2f532327dc0d4203f0ba9fe90d.zip |
Kernel: Make identity mapping mechanism used during AP boot non-generic
When booting AP's, we identity map a region at 0x8000 while doing the
initial bringup sequence. This is the only thing in the kernel that
requires an identity mapping, yet we had a bunch of generic API's and a
dedicated VirtualRangeAllocator in every PageDirectory for this purpose.
This patch simplifies the situation by moving the identity mapping logic
to the AP boot code and removing the generic API's.
-rw-r--r-- | Kernel/Interrupts/APIC.cpp | 20 | ||||
-rw-r--r-- | Kernel/Memory/MemoryManager.cpp | 13 | ||||
-rw-r--r-- | Kernel/Memory/MemoryManager.h | 1 | ||||
-rw-r--r-- | Kernel/Memory/PageDirectory.cpp | 1 | ||||
-rw-r--r-- | Kernel/Memory/PageDirectory.h | 3 | ||||
-rw-r--r-- | Kernel/Memory/Region.cpp | 5 |
6 files changed, 20 insertions, 23 deletions
diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp index cd7f04f068..a2bfa0dd8e 100644 --- a/Kernel/Interrupts/APIC.cpp +++ b/Kernel/Interrupts/APIC.cpp @@ -15,6 +15,7 @@ #include <Kernel/IO.h> #include <Kernel/Interrupts/APIC.h> #include <Kernel/Interrupts/SpuriousInterruptHandler.h> +#include <Kernel/Memory/AnonymousVMObject.h> #include <Kernel/Memory/MemoryManager.h> #include <Kernel/Memory/PageDirectory.h> #include <Kernel/Memory/TypedMapping.h> @@ -274,6 +275,19 @@ UNMAP_AFTER_INIT bool APIC::init_bsp() return true; } +UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_region(PhysicalAddress paddr, size_t size) +{ + auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size); + VERIFY(vmobject); + auto region = MM.allocate_kernel_region_with_vmobject( + Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size }, + vmobject.release_nonnull(), + {}, + Memory::Region::Access::Read | Memory::Region::Access::Write | Memory::Region::Access::Execute); + VERIFY(region); + return region.release_nonnull(); +} + UNMAP_AFTER_INIT void APIC::do_boot_aps() { VERIFY(m_processor_enabled_cnt > 1); @@ -283,7 +297,7 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() // Also account for the data appended to: // * aps_to_enable u32 values for ap_cpu_init_stacks // * aps_to_enable u32 values for ap_cpu_init_processor_info_array - auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Memory::Region::Access::Read | Memory::Region::Access::Write | Memory::Region::Access::Execute); + auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32)))); memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size); // Allocate enough stacks for all APs @@ -362,6 +376,10 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps() } dbgln_if(APIC_DEBUG, "APIC: {} processors are initialized and running", m_processor_enabled_cnt); + + // NOTE: Since this region is identity-mapped, we have to unmap it manually to prevent the virtual + // address range from leaking into the general virtual range allocator. + apic_startup_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); } UNMAP_AFTER_INIT void APIC::boot_aps() diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 0b5f5f12a2..e117aed54c 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -733,19 +733,6 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable); } -OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) -{ - auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size); - if (!vm_object) - return {}; - VERIFY(!(size % PAGE_SIZE)); - ScopedSpinLock lock(s_mm_lock); - auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size); - if (!range.has_value()) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable); -} - OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) { ScopedSpinLock lock(s_mm_lock); diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index 5070b6706e..bc2bbd312b 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -183,7 +183,6 @@ public: OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); - OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); diff --git a/Kernel/Memory/PageDirectory.cpp b/Kernel/Memory/PageDirectory.cpp index 6aca487b3e..4d8cf94e88 100644 --- a/Kernel/Memory/PageDirectory.cpp +++ b/Kernel/Memory/PageDirectory.cpp @@ -38,7 +38,6 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_ // make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy FlatPtr start_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000; directory->m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range); - directory->m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000); return directory; } diff --git a/Kernel/Memory/PageDirectory.h b/Kernel/Memory/PageDirectory.h index 98d3d94cf4..b50a2867b0 100644 --- a/Kernel/Memory/PageDirectory.h +++ b/Kernel/Memory/PageDirectory.h @@ -39,8 +39,6 @@ public: VirtualRangeAllocator& range_allocator() { return m_range_allocator; } VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; } - VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } - AddressSpace* address_space() { return m_space; } const AddressSpace* address_space() const { return m_space; } @@ -53,7 +51,6 @@ private: AddressSpace* m_space { nullptr }; VirtualRangeAllocator m_range_allocator; - VirtualRangeAllocator m_identity_range_allocator; #if ARCH(X86_64) RefPtr<PhysicalPage> m_pml4t; #endif diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 07f16d7a93..a5b5758b94 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -247,10 +247,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryVirtualRange deallocate_range) } MM.flush_tlb(m_page_directory, vaddr(), page_count()); if (deallocate_range == ShouldDeallocateVirtualMemoryVirtualRange::Yes) { - if (m_page_directory->range_allocator().contains(range())) - m_page_directory->range_allocator().deallocate(range()); - else - m_page_directory->identity_range_allocator().deallocate(range()); + m_page_directory->range_allocator().deallocate(range()); } m_page_directory = nullptr; } |