summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorDaniel Bertalan <dani@danielbertalan.dev>2021-12-19 01:15:12 +0100
committerBrian Gianforcaro <b.gianfo@gmail.com>2021-12-22 00:02:36 -0800
commit4fc28bfe02f11526d9b63affcda0d8dcafb725b2 (patch)
tree5bd7888478259f7c79ee14df452003524c15cb66 /Kernel/Memory
parent2f1b4b8a815d1b8d2b9bec34ce0bb916c80f41b3 (diff)
downloadserenity-4fc28bfe02f11526d9b63affcda0d8dcafb725b2.zip
Kernel: Unmap Prekernel pages after they are no longer needed
The Prekernel's memory is only accessed until MemoryManager has been initialized. Keeping them around afterwards is both unnecessary and bad, as it prevents the userland from using the 0x100000-0x155000 virtual address range. Co-authored-by: Idan Horowitz <idan.horowitz@gmail.com>
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/MemoryManager.cpp20
-rw-r--r--Kernel/Memory/MemoryManager.h15
-rw-r--r--Kernel/Memory/Region.cpp2
3 files changed, 29 insertions, 8 deletions
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index 006038ea31..fb0a4ac410 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -106,6 +106,19 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
}
}
+UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
+{
+ SpinlockLocker page_lock(kernel_page_directory().get_lock());
+ SpinlockLocker mm_lock(s_mm_lock);
+
+ auto start = start_of_prekernel_image.page_base().get();
+ auto end = end_of_prekernel_image.page_base().get();
+
+ for (auto i = start; i <= end; i += PAGE_SIZE)
+ release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No, UnsafeIgnoreMissingPageTable::Yes);
+ flush_tlb(&kernel_page_directory(), VirtualAddress(start), (end - start) / PAGE_SIZE);
+}
+
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
@@ -200,7 +213,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
if (multiboot_flags & 0x4) {
@@ -570,7 +582,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
-void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
+void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release, UnsafeIgnoreMissingPageTable unsafe_ignore_missing_page_table)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
@@ -586,7 +598,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
auto& pte = page_table[page_table_index];
pte.clear();
- if (is_last_release || page_table_index == 0x1ff) {
+ if (is_last_pte_release == IsLastPTERelease::Yes || page_table_index == 0x1ff) {
// If this is the last PTE in a region or the last PTE in a page table then
// check if we can also release the page table
bool all_clear = true;
@@ -600,7 +612,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
pde.clear();
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
- VERIFY(result);
+ VERIFY(unsafe_ignore_missing_page_table == UnsafeIgnoreMissingPageTable::Yes || result);
}
}
}
diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h
index 3b0b537aec..d4c32beb2f 100644
--- a/Kernel/Memory/MemoryManager.h
+++ b/Kernel/Memory/MemoryManager.h
@@ -49,19 +49,19 @@ inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
enum class UsedMemoryRangeType {
LowMemory = 0,
- Prekernel,
Kernel,
BootModule,
PhysicalPages,
+ __Count
};
static constexpr StringView UserMemoryRangeTypeNames[] {
"Low memory",
- "Prekernel",
"Kernel",
"Boot module",
"Physical Pages"
};
+static_assert(array_size(UserMemoryRangeTypeNames) == to_underlying(UsedMemoryRangeType::__Count));
struct UsedMemoryRange {
UsedMemoryRangeType type {};
@@ -159,6 +159,7 @@ public:
void set_page_writable_direct(VirtualAddress, bool);
void protect_readonly_after_init_memory();
+ void unmap_prekernel();
void unmap_text_after_init();
void protect_ksyms_after_init();
@@ -276,7 +277,15 @@ private:
PageTableEntry* pte(PageDirectory&, VirtualAddress);
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
- void release_pte(PageDirectory&, VirtualAddress, bool);
+ enum class IsLastPTERelease {
+ Yes,
+ No
+ };
+ enum class UnsafeIgnoreMissingPageTable {
+ Yes,
+ No
+ };
+ void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease, UnsafeIgnoreMissingPageTable = UnsafeIgnoreMissingPageTable::No);
RefPtr<PageDirectory> m_kernel_page_directory;
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 8bb78447af..535a32d458 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -243,7 +243,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
- MM.release_pte(*m_page_directory, vaddr, i == count - 1);
+ MM.release_pte(*m_page_directory, vaddr, i == count - 1 ? MemoryManager::IsLastPTERelease::Yes : MemoryManager::IsLastPTERelease::No);
}
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {