summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-11-04 00:26:00 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-11-04 00:26:00 +0100
commit0e8f1d7cb64bbe198d44b188f8684af8cd561d4f (patch)
tree9fae1b111143efa9ceb3cf8e27a1edeb705ccfcb
parent6ed9cc47174728cfdc5134bd41744e8ecf8eacdc (diff)
downloadserenity-0e8f1d7cb64bbe198d44b188f8684af8cd561d4f.zip
Kernel: Don't expose a region's page directory to the outside world
Now that region manages its own mapping/unmapping, there's no need for the outside world to be able to grab at its page directory.
-rw-r--r--Kernel/VM/MemoryManager.cpp1
-rw-r--r--Kernel/VM/Region.cpp16
-rw-r--r--Kernel/VM/Region.h2
3 files changed, 8 insertions, 11 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index e007c29b2d..00b94a6f68 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -365,7 +365,6 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
{
- ASSERT(region.page_directory());
ASSERT(region.vmobject().is_inode());
auto& vmobject = region.vmobject();
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index 2e05dba93b..45e9ff5c64 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -175,10 +175,10 @@ Bitmap& Region::ensure_cow_map() const
void Region::remap_page(size_t index)
{
- ASSERT(page_directory());
+ ASSERT(m_page_directory);
InterruptDisabler disabler;
auto page_vaddr = vaddr().offset(index * PAGE_SIZE);
- auto& pte = MM.ensure_pte(*page_directory(), page_vaddr);
+ auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
auto& physical_page = vmobject().physical_pages()[first_page_index() + index];
ASSERT(physical_page);
pte.set_physical_page_base(physical_page->paddr().get());
@@ -188,31 +188,31 @@ void Region::remap_page(size_t index)
else
pte.set_writable(is_writable());
pte.set_user_allowed(is_user_accessible());
- page_directory()->flush(page_vaddr);
+ m_page_directory->flush(page_vaddr);
#ifdef MM_DEBUG
- dbg() << "MM: >> region.remap_page (PD=" << page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
+ dbg() << "MM: >> region.remap_page (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
#endif
}
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{
InterruptDisabler disabler;
- ASSERT(page_directory());
+ ASSERT(m_page_directory);
for (size_t i = 0; i < page_count(); ++i) {
auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
- auto& pte = MM.ensure_pte(*page_directory(), vaddr);
+ auto& pte = MM.ensure_pte(*m_page_directory, vaddr);
pte.set_physical_page_base(0);
pte.set_present(false);
pte.set_writable(false);
pte.set_user_allowed(false);
- page_directory()->flush(vaddr);
+ m_page_directory->flush(vaddr);
#ifdef MM_DEBUG
auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0);
#endif
}
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
- page_directory()->range_allocator().deallocate(range());
+ m_page_directory->range_allocator().deallocate(range());
m_page_directory = nullptr;
}
diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h
index 5a3b7ca4ad..2e7a26da57 100644
--- a/Kernel/VM/Region.h
+++ b/Kernel/VM/Region.h
@@ -89,8 +89,6 @@ public:
size_t amount_resident() const;
size_t amount_shared() const;
- PageDirectory* page_directory() { return m_page_directory.ptr(); }
-
bool should_cow(size_t page_index) const;
void set_should_cow(size_t page_index, bool);