summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Memory/MemoryManager.cpp20
-rw-r--r--Kernel/Memory/MemoryManager.h15
-rw-r--r--Kernel/Memory/Region.cpp2
-rw-r--r--Kernel/init.cpp2
4 files changed, 31 insertions, 8 deletions
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index 006038ea31..fb0a4ac410 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -106,6 +106,19 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
}
}
+UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
+{
+ SpinlockLocker page_lock(kernel_page_directory().get_lock());
+ SpinlockLocker mm_lock(s_mm_lock);
+
+ auto start = start_of_prekernel_image.page_base().get();
+ auto end = end_of_prekernel_image.page_base().get();
+
+ for (auto i = start; i <= end; i += PAGE_SIZE)
+ release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No, UnsafeIgnoreMissingPageTable::Yes);
+ flush_tlb(&kernel_page_directory(), VirtualAddress(start), (end - start) / PAGE_SIZE);
+}
+
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
@@ -200,7 +213,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
if (multiboot_flags & 0x4) {
@@ -570,7 +582,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
-void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
+void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release, UnsafeIgnoreMissingPageTable unsafe_ignore_missing_page_table)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
@@ -586,7 +598,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
auto& pte = page_table[page_table_index];
pte.clear();
- if (is_last_release || page_table_index == 0x1ff) {
+ if (is_last_pte_release == IsLastPTERelease::Yes || page_table_index == 0x1ff) {
// If this is the last PTE in a region or the last PTE in a page table then
// check if we can also release the page table
bool all_clear = true;
@@ -600,7 +612,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
pde.clear();
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
- VERIFY(result);
+ VERIFY(unsafe_ignore_missing_page_table == UnsafeIgnoreMissingPageTable::Yes || result);
}
}
}
diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h
index 3b0b537aec..d4c32beb2f 100644
--- a/Kernel/Memory/MemoryManager.h
+++ b/Kernel/Memory/MemoryManager.h
@@ -49,19 +49,19 @@ inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
enum class UsedMemoryRangeType {
LowMemory = 0,
- Prekernel,
Kernel,
BootModule,
PhysicalPages,
+ __Count
};
static constexpr StringView UserMemoryRangeTypeNames[] {
"Low memory",
- "Prekernel",
"Kernel",
"Boot module",
"Physical Pages"
};
+static_assert(array_size(UserMemoryRangeTypeNames) == to_underlying(UsedMemoryRangeType::__Count));
struct UsedMemoryRange {
UsedMemoryRangeType type {};
@@ -159,6 +159,7 @@ public:
void set_page_writable_direct(VirtualAddress, bool);
void protect_readonly_after_init_memory();
+ void unmap_prekernel();
void unmap_text_after_init();
void protect_ksyms_after_init();
@@ -276,7 +277,15 @@ private:
PageTableEntry* pte(PageDirectory&, VirtualAddress);
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
- void release_pte(PageDirectory&, VirtualAddress, bool);
+ enum class IsLastPTERelease {
+ Yes,
+ No
+ };
+ enum class UnsafeIgnoreMissingPageTable {
+ Yes,
+ No
+ };
+ void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease, UnsafeIgnoreMissingPageTable = UnsafeIgnoreMissingPageTable::No);
RefPtr<PageDirectory> m_kernel_page_directory;
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 8bb78447af..535a32d458 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -243,7 +243,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
- MM.release_pte(*m_page_directory, vaddr, i == count - 1);
+ MM.release_pte(*m_page_directory, vaddr, i == count - 1 ? MemoryManager::IsLastPTERelease::Yes : MemoryManager::IsLastPTERelease::No);
}
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {
diff --git a/Kernel/init.cpp b/Kernel/init.cpp
index fee2ec1af9..676d970a70 100644
--- a/Kernel/init.cpp
+++ b/Kernel/init.cpp
@@ -193,6 +193,8 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
CommandLine::initialize();
Memory::MemoryManager::initialize(0);
+ MM.unmap_prekernel();
+
// Ensure that the safemem sections are not empty. This could happen if the linker accidentally discards the sections.
VERIFY(+start_of_safemem_text != +end_of_safemem_text);
VERIFY(+start_of_safemem_atomic_text != +end_of_safemem_atomic_text);