summaryrefslogtreecommitdiff
path: root/Kernel/VM/PageDirectory.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-12-25 11:22:16 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-12-25 13:35:57 +0100
commit52deb09382880b27ae3fca395cc9340e374f5142 (patch)
tree1d8b5ada315f5ba736e93eaf104d38ecfd55745d /Kernel/VM/PageDirectory.cpp
parent4883176fd8a7afb085cce9d893d016ff9d08b764 (diff)
downloadserenity-52deb09382880b27ae3fca395cc9340e374f5142.zip
Kernel: Enable PAE (Physical Address Extension)
Introduce one more (CPU) indirection layer in the paging code: the page directory pointer table (PDPT). Each PageDirectory now has 4 separate PageDirectoryEntry arrays, governing 1 GB of VM each. A really neat side-effect of this is that we can now share the physical page containing the >=3GB kernel-only address space metadata between all processes, instead of lazily cloning it on page faults. This will give us access to the NX (No eXecute) bit, allowing us to prevent execution of memory that's not supposed to be executed.
Diffstat (limited to 'Kernel/VM/PageDirectory.cpp')
-rw-r--r--Kernel/VM/PageDirectory.cpp41
1 files changed, 31 insertions, 10 deletions
diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp
index e25c6e9119..729cf4c9be 100644
--- a/Kernel/VM/PageDirectory.cpp
+++ b/Kernel/VM/PageDirectory.cpp
@@ -24,7 +24,17 @@ RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3)
PageDirectory::PageDirectory(PhysicalAddress paddr)
: m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000)
{
- m_directory_page = PhysicalPage::create(paddr, true, false);
+ m_directory_table = PhysicalPage::create(paddr, true, false);
+ m_directory_pages[0] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 1), true, false);
+ m_directory_pages[1] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 2), true, false);
+ m_directory_pages[2] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 3), true, false);
+ m_directory_pages[3] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 4), true, false);
+
+ table().raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
+ table().raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
+ table().raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
+ table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
+
InterruptDisabler disabler;
cr3_map().set(cr3(), this);
}
@@ -33,7 +43,26 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
: m_process(&process)
, m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(VirtualAddress(userspace_range_base), kernelspace_range_base - userspace_range_base))
{
- MM.populate_page_directory(*this);
+ // Set up a userspace page directory
+
+ m_directory_table = MM.allocate_supervisor_physical_page();
+ m_directory_pages[0] = MM.allocate_supervisor_physical_page();
+ m_directory_pages[1] = MM.allocate_supervisor_physical_page();
+ m_directory_pages[2] = MM.allocate_supervisor_physical_page();
+ // Share the top 1 GB of kernel-only mappings (>=3GB or >=0xc0000000)
+ m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
+
+ table().raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
+ table().raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
+ table().raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
+ table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
+
+ // Clone bottom 8 MB of mappings from kernel_page_directory
+ table().directory(0)[0].copy_from({}, MM.kernel_page_directory().table().directory(0)[0]);
+ table().directory(0)[1].copy_from({}, MM.kernel_page_directory().table().directory(0)[1]);
+ table().directory(0)[2].copy_from({}, MM.kernel_page_directory().table().directory(0)[2]);
+ table().directory(0)[3].copy_from({}, MM.kernel_page_directory().table().directory(0)[3]);
+
InterruptDisabler disabler;
cr3_map().set(cr3(), this);
}
@@ -57,11 +86,3 @@ void PageDirectory::flush(VirtualAddress vaddr)
if (this == &MM.kernel_page_directory() || &current->process().page_directory() == this)
MM.flush_tlb(vaddr);
}
-
-void PageDirectory::update_kernel_mappings()
-{
- // This ensures that the kernel virtual address space is up-to-date in this page directory.
- // This may be necessary to avoid triple faulting when entering a process's paging scope
- // whose mappings are out-of-date.
- memcpy(entries() + 768, MM.kernel_page_directory().entries() + 768, sizeof(PageDirectoryEntry) * 256);
-}