summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-19 18:21:54 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-19 20:23:05 +0100
commit6136faa4ebf6a878606f33bc03c5e62de9d5e662 (patch)
treec239f7caf485cf4692f5c9f5ffc8742d81971454
parentda100f12a6a204091371c804922471935b736b63 (diff)
downloadserenity-6136faa4ebf6a878606f33bc03c5e62de9d5e662.zip
Kernel: Add .unmap_after_init section for code we don't need after init
You can now declare functions with UNMAP_AFTER_INIT and they'll get segregated into a separate kernel section that gets completely unmapped at the end of initialization. This can be used for anything we don't need to call once we've booted into userspace. There are two nice things about this mechanism: - It allows us to free up entire pages of memory for other use. (Note that this patch does not actually make use of the freed pages yet, but in the future we totally could!) - It allows us to get rid of obviously dangerous gadgets like write-to-CR0 and write-to-CR4 which are very useful for an attacker trying to disable SMAP/SMEP/etc. I've also made sure to include a helpful panic message in case you hit a kernel crash because of this protection. :^)
-rw-r--r--Kernel/Arch/i386/CPU.cpp7
-rw-r--r--Kernel/Arch/i386/CPU.h1
-rw-r--r--Kernel/VM/MemoryManager.cpp20
-rw-r--r--Kernel/VM/MemoryManager.h1
-rw-r--r--Kernel/init.cpp3
-rw-r--r--Kernel/linker.ld8
6 files changed, 40 insertions, 0 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index a5467e911d..5030e9d654 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -53,6 +53,8 @@
#include <Kernel/VM/ProcessPagingScope.h>
#include <LibC/mallocdefs.h>
+extern FlatPtr start_of_unmap_after_init;
+extern FlatPtr end_of_unmap_after_init;
extern FlatPtr start_of_ro_after_init;
extern FlatPtr end_of_ro_after_init;
@@ -262,6 +264,11 @@ void page_fault_handler(TrapFrame* trap)
PANIC("Attempt to write into READONLY_AFTER_INIT section");
}
+ if (fault_address >= (FlatPtr)&start_of_unmap_after_init && fault_address < (FlatPtr)&end_of_unmap_after_init) {
+ dump(regs);
+ PANIC("Attempt to access UNMAP_AFTER_INIT section");
+ }
+
auto response = MM.handle_page_fault(PageFault(regs.exception_code, VirtualAddress(fault_address)));
if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory) {
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index 3bfcf68f35..e2ea68b437 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -35,6 +35,7 @@
#include <LibC/sys/arch/i386/regs.h>
#define READONLY_AFTER_INIT __attribute__((section(".ro_after_init")))
+#define UNMAP_AFTER_INIT __attribute__((section(".unmap_after_init")))
#define PAGE_SIZE 4096
#define GENERIC_INTERRUPT_HANDLERS_COUNT (256 - IRQ_VECTOR_BASE)
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 5d3b86931e..b780f82655 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -48,6 +48,8 @@ extern FlatPtr start_of_kernel_data;
extern FlatPtr end_of_kernel_bss;
extern FlatPtr start_of_ro_after_init;
extern FlatPtr end_of_ro_after_init;
+extern FlatPtr start_of_unmap_after_init;
+extern FlatPtr end_of_unmap_after_init;
extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
extern size_t multiboot_copy_boot_modules_count;
@@ -135,6 +137,24 @@ void MemoryManager::protect_readonly_after_init_memory()
}
}
+void MemoryManager::unmap_memory_after_init()
+{
+ ScopedSpinLock mm_lock(s_mm_lock);
+ ScopedSpinLock page_lock(kernel_page_directory().get_lock());
+
+ auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
+ auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
+
+ // Unmap the entire .unmap_after_init section
+ for (auto i = start; i < end; i += PAGE_SIZE) {
+ auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
+ pte.clear();
+ flush_tlb(&kernel_page_directory(), VirtualAddress(i));
+ }
+
+ dmesgln("Unmapped {} KiB of kernel text after init! :^)", (end - start) / KiB);
+}
+
void MemoryManager::register_reserved_ranges()
{
ASSERT(!m_physical_memory_ranges.is_empty());
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 4d307fee9e..5e9491e8be 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -142,6 +142,7 @@ public:
PageFaultResponse handle_page_fault(const PageFault&);
void protect_readonly_after_init_memory();
+ void unmap_memory_after_init();
static void enter_process_paging_scope(Process&);
static void enter_space(Space&);
diff --git a/Kernel/init.cpp b/Kernel/init.cpp
index 71dab40eda..002a0a6841 100644
--- a/Kernel/init.cpp
+++ b/Kernel/init.cpp
@@ -299,6 +299,9 @@ void init_stage2(void*)
// NOTE: Everything marked READONLY_AFTER_INIT becomes non-writable after this point.
MM.protect_readonly_after_init_memory();
+ // NOTE: Everything marked UNMAP_AFTER_INIT becomes inaccessible after this point.
+ MM.unmap_memory_after_init();
+
int error;
// FIXME: It would be nicer to set the mode from userspace.
diff --git a/Kernel/linker.ld b/Kernel/linker.ld
index e357c29e66..3f76fbefe8 100644
--- a/Kernel/linker.ld
+++ b/Kernel/linker.ld
@@ -20,6 +20,14 @@ SECTIONS
end_of_safemem_atomic_text = .;
*(.text*)
+ }
+
+ .unmap_after_init ALIGN(4K) : AT (ADDR(.unmap_after_init) - 0xc0000000)
+ {
+ start_of_unmap_after_init = .;
+ *(.unmap_after_init*);
+ end_of_unmap_after_init = .;
+
end_of_kernel_text = .;
}