summaryrefslogtreecommitdiff
path: root/Kernel/VM/MemoryManager.h
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-12-25 11:22:16 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-12-25 13:35:57 +0100
commit52deb09382880b27ae3fca395cc9340e374f5142 (patch)
tree1d8b5ada315f5ba736e93eaf104d38ecfd55745d /Kernel/VM/MemoryManager.h
parent4883176fd8a7afb085cce9d893d016ff9d08b764 (diff)
downloadserenity-52deb09382880b27ae3fca395cc9340e374f5142.zip
Kernel: Enable PAE (Physical Address Extension)
Introduce one more (CPU) indirection layer in the paging code: the page directory pointer table (PDPT). Each PageDirectory now has 4 separate PageDirectoryEntry arrays, governing 1 GB of VM each. A really neat side-effect of this is that we can now share the physical page containing the >=3GB kernel-only address space metadata between all processes, instead of lazily cloning it on page faults. This will give us access to the NX (No eXecute) bit, allowing us to prevent execution of memory that's not supposed to be executed.
Diffstat (limited to 'Kernel/VM/MemoryManager.h')
-rw-r--r--Kernel/VM/MemoryManager.h5
1 files changed, 1 insertions, 4 deletions
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index ce0c52ff73..ef00bee9da 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -42,8 +42,6 @@ public:
PageFaultResponse handle_page_fault(const PageFault&);
- void populate_page_directory(PageDirectory&);
-
void enter_process_paging_scope(Process&);
bool validate_user_stack(const Process&, VirtualAddress) const;
@@ -114,8 +112,7 @@ private:
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
RefPtr<PageDirectory> m_kernel_page_directory;
- PageTableEntry* m_page_table_zero { nullptr };
- PageTableEntry* m_page_table_one { nullptr };
+ PageTableEntry* m_low_page_tables[4] { nullptr };
VirtualAddress m_quickmap_addr;