summaryrefslogtreecommitdiff
path: root/Kernel/VM/Region.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-11-04 00:05:57 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-11-04 00:05:57 +0100
commit9b2dc362292293394d212e0d7b0ac86579d077e9 (patch)
treec2edf4ca6ee55d5a2c32c6c76144c8250986f1bb /Kernel/VM/Region.cpp
parent98b328754ec8bd29bd495c6567343e2422a33fa5 (diff)
downloadserenity-9b2dc362292293394d212e0d7b0ac86579d077e9.zip
Kernel: Merge MemoryManager::map_region_at_address() into Region::map()
Diffstat (limited to 'Kernel/VM/Region.cpp')
-rw-r--r--Kernel/VM/Region.cpp38
1 files changed, 32 insertions, 6 deletions
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index c47d1c8eb4..682f28f55c 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -6,6 +6,8 @@
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/Region.h>
+//#define MM_DEBUG
+
Region::Region(const Range& range, const String& name, u8 access)
: m_range(range)
, m_vmobject(AnonymousVMObject::create_with_size(size()))
@@ -190,10 +192,8 @@ void Region::remap_page(size_t index)
#ifdef MM_DEBUG
dbg() << "MM: >> region.remap_page (PD=" << page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
#endif
-
}
-
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{
InterruptDisabler disabler;
@@ -207,8 +207,8 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
pte.set_user_allowed(false);
page_directory()->flush(vaddr);
#ifdef MM_DEBUG
- auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
- dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
+ auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
+ dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0);
#endif
}
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
@@ -218,11 +218,37 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
void Region::map(PageDirectory& page_directory)
{
- MM.map_region_at_address(page_directory, *this, vaddr());
+ InterruptDisabler disabler;
+ set_page_directory(page_directory);
+#ifdef MM_DEBUG
+ dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", first_page_index(), last_page_index(), vmobject().page_count());
+#endif
+ for (size_t i = 0; i < page_count(); ++i) {
+ auto page_vaddr = vaddr().offset(i * PAGE_SIZE);
+ auto& pte = MM.ensure_pte(page_directory, page_vaddr);
+ auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
+ if (physical_page) {
+ pte.set_physical_page_base(physical_page->paddr().get());
+ pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
+ if (should_cow(i))
+ pte.set_writable(false);
+ else
+ pte.set_writable(is_writable());
+ } else {
+ pte.set_physical_page_base(0);
+ pte.set_present(false);
+ pte.set_writable(is_writable());
+ }
+ pte.set_user_allowed(is_user_accessible());
+ page_directory.flush(page_vaddr);
+#ifdef MM_DEBUG
+ dbgprintf("MM: >> map_region_at_address (PD=%p) '%s' V%p => P%p (@%p)\n", &page_directory, name().characters(), page_vaddr.get(), physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
+#endif
+ }
}
void Region::remap()
{
ASSERT(m_page_directory);
- MM.map_region_at_address(*m_page_directory, *this, vaddr());
+ map(*m_page_directory);
}