summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Process.cpp12
-rw-r--r--Kernel/VM/MemoryManager.cpp29
-rw-r--r--Kernel/VM/MemoryManager.h3
-rw-r--r--Kernel/VM/Region.cpp30
-rw-r--r--Kernel/VM/Region.h7
5 files changed, 42 insertions, 39 deletions
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp
index 98107a2b2f..383f7e1f96 100644
--- a/Kernel/Process.cpp
+++ b/Kernel/Process.cpp
@@ -122,7 +122,7 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
if (!range.is_valid())
return nullptr;
m_regions.append(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
- MM.map_region(*this, m_regions.last());
+ m_regions.last().map(*this);
if (commit)
m_regions.last().commit();
return &m_regions.last();
@@ -134,7 +134,7 @@ Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size,
if (!range.is_valid())
return nullptr;
m_regions.append(Region::create_user_accessible(range, inode, name, prot_to_region_access_flags(prot)));
- MM.map_region(*this, m_regions.last());
+ m_regions.last().map(*this);
return &m_regions.last();
}
@@ -145,7 +145,7 @@ Region* Process::allocate_region_with_vmo(VirtualAddress vaddr, size_t size, Non
return nullptr;
offset_in_vmo &= PAGE_MASK;
m_regions.append(Region::create_user_accessible(range, move(vmo), offset_in_vmo, name, prot_to_region_access_flags(prot)));
- MM.map_region(*this, m_regions.last());
+ m_regions.last().map(*this);
return &m_regions.last();
}
@@ -259,7 +259,7 @@ int Process::sys$munmap(void* addr, size_t size)
}
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
- MM.unmap_region(*old_region, false);
+ old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
deallocate_region(*old_region);
// Instead we give back the unwanted VM manually.
@@ -267,7 +267,7 @@ int Process::sys$munmap(void* addr, size_t size)
// And finally we map the new region(s).
for (auto* new_region : new_regions) {
- MM.map_region(*this, *new_region);
+ new_region->map(*this);
}
return 0;
}
@@ -313,7 +313,7 @@ Process* Process::fork(RegisterDump& regs)
dbg() << "fork: cloning Region{" << &region << "} '" << region.name() << "' @ " << region.vaddr();
#endif
child->m_regions.append(region.clone());
- MM.map_region(*child, child->m_regions.last());
+ child->m_regions.last().map(*child);
if (&region == m_master_tls_region)
child->m_master_tls_region = &child->m_regions.last();
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 4a418263bb..e56d0e97f7 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -713,35 +713,6 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
}
}
-bool MemoryManager::unmap_region(Region& region, bool deallocate_range)
-{
- ASSERT(region.page_directory());
- InterruptDisabler disabler;
- for (size_t i = 0; i < region.page_count(); ++i) {
- auto vaddr = region.vaddr().offset(i * PAGE_SIZE);
- auto& pte = ensure_pte(*region.page_directory(), vaddr);
- pte.set_physical_page_base(0);
- pte.set_present(false);
- pte.set_writable(false);
- pte.set_user_allowed(false);
- region.page_directory()->flush(vaddr);
-#ifdef MM_DEBUG
- auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
- dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
-#endif
- }
- if (deallocate_range)
- region.page_directory()->range_allocator().deallocate(region.range());
- region.release_page_directory();
- return true;
-}
-
-bool MemoryManager::map_region(Process& process, Region& region)
-{
- map_region_at_address(process.page_directory(), region, region.vaddr());
- return true;
-}
-
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
{
auto* region = region_from_vaddr(process, vaddr);
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 5c6b53bd03..7a77933127 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -47,9 +47,6 @@ public:
PageFaultResponse handle_page_fault(const PageFault&);
- bool map_region(Process&, Region&);
- bool unmap_region(Region&, bool deallocate_range = true);
-
void populate_page_directory(PageDirectory&);
void enter_process_paging_scope(Process&);
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index d7e2e4ce61..7d79181147 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -41,7 +41,7 @@ Region::~Region()
// find the address<->region mappings in an invalid state there.
InterruptDisabler disabler;
if (m_page_directory) {
- MM.unmap_region(*this);
+ unmap(ShouldDeallocateVirtualMemoryRange::Yes);
ASSERT(!m_page_directory);
}
MM.unregister_region(*this);
@@ -192,3 +192,31 @@ void Region::remap_page(size_t index)
#endif
}
+
+
+void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
+{
+ InterruptDisabler disabler;
+ ASSERT(page_directory());
+ for (size_t i = 0; i < page_count(); ++i) {
+ auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
+ auto& pte = MM.ensure_pte(*page_directory(), vaddr);
+ pte.set_physical_page_base(0);
+ pte.set_present(false);
+ pte.set_writable(false);
+ pte.set_user_allowed(false);
+ page_directory()->flush(vaddr);
+#ifdef MM_DEBUG
+ auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
+ dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
+#endif
+ }
+ if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
+ page_directory()->range_allocator().deallocate(range());
+ release_page_directory();
+}
+
+void Region::map(Process& process)
+{
+ MM.map_region_at_address(process.page_directory(), *this, vaddr());
+}
diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h
index f7244b1e9c..6c90415bda 100644
--- a/Kernel/VM/Region.h
+++ b/Kernel/VM/Region.h
@@ -114,6 +114,13 @@ public:
m_access &= ~Access::Write;
}
+ void map(Process&);
+ enum class ShouldDeallocateVirtualMemoryRange {
+ No,
+ Yes,
+ };
+ void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
+
void remap_page(size_t index);
// For InlineLinkedListNode