summaryrefslogtreecommitdiff
path: root/Kernel/VM
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-19 18:21:54 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-19 20:23:05 +0100
commit6136faa4ebf6a878606f33bc03c5e62de9d5e662 (patch)
treec239f7caf485cf4692f5c9f5ffc8742d81971454 /Kernel/VM
parentda100f12a6a204091371c804922471935b736b63 (diff)
downloadserenity-6136faa4ebf6a878606f33bc03c5e62de9d5e662.zip
Kernel: Add .unmap_after_init section for code we don't need after init
You can now declare functions with UNMAP_AFTER_INIT and they'll get segregated into a separate kernel section that gets completely unmapped at the end of initialization. This can be used for anything we don't need to call once we've booted into userspace. There are two nice things about this mechanism: - It allows us to free up entire pages of memory for other use. (Note that this patch does not actually make use of the freed pages yet, but in the future we totally could!) - It allows us to get rid of obviously dangerous gadgets like write-to-CR0 and write-to-CR4 which are very useful for an attacker trying to disable SMAP/SMEP/etc. I've also made sure to include a helpful panic message in case you hit a kernel crash because of this protection. :^)
Diffstat (limited to 'Kernel/VM')
-rw-r--r--Kernel/VM/MemoryManager.cpp20
-rw-r--r--Kernel/VM/MemoryManager.h1
2 files changed, 21 insertions, 0 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 5d3b86931e..b780f82655 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -48,6 +48,8 @@ extern FlatPtr start_of_kernel_data;
extern FlatPtr end_of_kernel_bss;
extern FlatPtr start_of_ro_after_init;
extern FlatPtr end_of_ro_after_init;
+extern FlatPtr start_of_unmap_after_init;
+extern FlatPtr end_of_unmap_after_init;
extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
extern size_t multiboot_copy_boot_modules_count;
@@ -135,6 +137,24 @@ void MemoryManager::protect_readonly_after_init_memory()
}
}
+void MemoryManager::unmap_memory_after_init()
+{
+ ScopedSpinLock mm_lock(s_mm_lock);
+ ScopedSpinLock page_lock(kernel_page_directory().get_lock());
+
+ auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
+ auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
+
+ // Unmap the entire .unmap_after_init section
+ for (auto i = start; i < end; i += PAGE_SIZE) {
+ auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
+ pte.clear();
+ flush_tlb(&kernel_page_directory(), VirtualAddress(i));
+ }
+
+ dmesgln("Unmapped {} KiB of kernel text after init! :^)", (end - start) / KiB);
+}
+
void MemoryManager::register_reserved_ranges()
{
ASSERT(!m_physical_memory_ranges.is_empty());
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 4d307fee9e..5e9491e8be 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -142,6 +142,7 @@ public:
PageFaultResponse handle_page_fault(const PageFault&);
void protect_readonly_after_init_memory();
+ void unmap_memory_after_init();
static void enter_process_paging_scope(Process&);
static void enter_space(Space&);