summaryrefslogtreecommitdiff
path: root/Kernel/VM
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2020-04-28 16:19:50 +0200
committerAndreas Kling <kling@serenityos.org>2020-04-28 17:05:14 +0200
commit9c856811b2ad18b933fc1d9d5b8a771893fa7d9c (patch)
tree264439a31fc3978ba85bae11d7d2d48bce5de03a /Kernel/VM
parent8a417c311f3c3c13dc5353619051aac777c9f1df (diff)
downloadserenity-9c856811b2ad18b933fc1d9d5b8a771893fa7d9c.zip
Kernel: Add Region helpers for accessing underlying physical pages
Since a Region is basically a view into a potentially larger VMObject, it was always necessary to include the Region starting offset when accessing its underlying physical pages. Until now, you had to do that manually, but this patch adds a simple Region::physical_page() for read-only access and a physical_page_slot() when you want a mutable reference to the RefPtr<PhysicalPage> itself. A lot of code is simplified by making use of this.
Diffstat (limited to 'Kernel/VM')
-rw-r--r--Kernel/VM/Region.cpp56
-rw-r--r--Kernel/VM/Region.h12
2 files changed, 40 insertions, 28 deletions
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index 79104439a8..d092239140 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -131,7 +131,7 @@ bool Region::commit(size_t page_index)
{
ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable());
InterruptDisabler disabler;
- auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index];
+ auto& vmobject_physical_page_entry = physical_page_slot(page_index);
if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page())
return true;
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
@@ -165,8 +165,8 @@ size_t Region::amount_resident() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
- auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
- if (physical_page && !physical_page->is_shared_zero_page())
+ auto* page = physical_page(i);
+ if (page && !page->is_shared_zero_page())
bytes += PAGE_SIZE;
}
return bytes;
@@ -176,8 +176,8 @@ size_t Region::amount_shared() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
- auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
- if (physical_page && physical_page->ref_count() > 1 && !physical_page->is_shared_zero_page())
+ auto* page = physical_page(i);
+ if (page && page->ref_count() > 1 && !page->is_shared_zero_page())
bytes += PAGE_SIZE;
}
return bytes;
@@ -199,8 +199,8 @@ NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefP
bool Region::should_cow(size_t page_index) const
{
- auto& slot = vmobject().physical_pages()[page_index];
- if (slot && slot->is_shared_zero_page())
+ auto* page = physical_page(page_index);
+ if (page && page->is_shared_zero_page())
return true;
if (m_shared)
return false;
@@ -224,12 +224,12 @@ void Region::map_individual_page_impl(size_t page_index)
{
auto page_vaddr = vaddr().offset(page_index * PAGE_SIZE);
auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
- auto& physical_page = vmobject().physical_pages()[first_page_index() + page_index];
- if (!physical_page || (!is_readable() && !is_writable())) {
+ auto* page = physical_page(page_index);
+ if (!page || (!is_readable() && !is_writable())) {
pte.clear();
} else {
pte.set_cache_disabled(!m_cacheable);
- pte.set_physical_page_base(physical_page->paddr().get());
+ pte.set_physical_page_base(page->paddr().get());
pte.set_present(true);
if (should_cow(page_index))
pte.set_writable(false);
@@ -239,7 +239,7 @@ void Region::map_individual_page_impl(size_t page_index)
pte.set_execute_disabled(!is_executable());
pte.set_user_allowed(is_user_accessible());
#ifdef MM_DEBUG
- dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
+ dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")";
#endif
}
MM.flush_tlb(page_vaddr);
@@ -249,7 +249,7 @@ void Region::remap_page(size_t page_index)
{
ASSERT(m_page_directory);
InterruptDisabler disabler;
- ASSERT(vmobject().physical_pages()[first_page_index() + page_index]);
+ ASSERT(physical_page(page_index));
map_individual_page_impl(page_index);
}
@@ -263,8 +263,8 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
pte.clear();
MM.flush_tlb(vaddr);
#ifdef MM_DEBUG
- auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
- dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", physical_page ? physical_page->paddr().get() : 0) << " <<";
+ auto* page = physical_page(i);
+ dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
#endif
}
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
@@ -315,7 +315,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
}
#ifdef MAP_SHARED_ZERO_PAGE_LAZILY
if (fault.is_read()) {
- vmobject().physical_pages()[first_page_index() + page_index_in_region] = MM.shared_zero_page();
+ physical_page_slot(page_index_in_region) = MM.shared_zero_page();
remap_page(page_index_in_region);
return PageFaultResponse::Continue;
}
@@ -330,7 +330,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
#ifdef PAGE_FAULT_DEBUG
dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "]";
#endif
- if (vmobject().physical_pages()[first_page_index() + page_index_in_region]->is_shared_zero_page()) {
+ if (physical_page(page_index_in_region)->is_shared_zero_page()) {
#ifdef PAGE_FAULT_DEBUG
dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "]";
#endif
@@ -351,9 +351,9 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
LOCKER(vmobject().m_paging_lock);
cli();
- auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
+ auto& page_slot = physical_page_slot(page_index_in_region);
- if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) {
+ if (!page_slot.is_null() && !page_slot->is_shared_zero_page()) {
#ifdef PAGE_FAULT_DEBUG
dbg() << "MM: zero_page() but page already present. Fine with me!";
#endif
@@ -364,8 +364,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
if (Thread::current)
Thread::current->did_zero_fault();
- auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
- if (physical_page.is_null()) {
+ auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
+ if (page.is_null()) {
klog() << "MM: handle_zero_fault was unable to allocate a physical page";
return PageFaultResponse::ShouldCrash;
}
@@ -373,7 +373,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
#ifdef PAGE_FAULT_DEBUG
dbg() << " >> ZERO " << physical_page->paddr();
#endif
- vmobject_physical_page_entry = move(physical_page);
+ page_slot = move(page);
remap_page(page_index_in_region);
return PageFaultResponse::Continue;
}
@@ -381,8 +381,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
{
ASSERT_INTERRUPTS_DISABLED();
- auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
- if (vmobject_physical_page_entry->ref_count() == 1) {
+ auto& page_slot = physical_page_slot(page_index_in_region);
+ if (page_slot->ref_count() == 1) {
#ifdef PAGE_FAULT_DEBUG
dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w";
#endif
@@ -397,19 +397,19 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
#ifdef PAGE_FAULT_DEBUG
dbg() << " >> It's a COW page and it's time to COW!";
#endif
- auto physical_page_to_copy = move(vmobject_physical_page_entry);
- auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
- if (physical_page.is_null()) {
+ auto physical_page_to_copy = move(page_slot);
+ auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
+ if (page.is_null()) {
klog() << "MM: handle_cow_fault was unable to allocate a physical page";
return PageFaultResponse::ShouldCrash;
}
- u8* dest_ptr = MM.quickmap_page(*physical_page);
+ u8* dest_ptr = MM.quickmap_page(*page);
const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
#ifdef PAGE_FAULT_DEBUG
dbg() << " >> COW " << physical_page->paddr() << " <- " << physical_page_to_copy->paddr();
#endif
copy_from_user(dest_ptr, src_ptr, PAGE_SIZE);
- vmobject_physical_page_entry = move(physical_page);
+ page_slot = move(page);
MM.unquickmap_page();
set_should_cow(page_index_in_region, false);
remap_page(page_index_in_region);
diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h
index b71c8986e9..1d95ea8844 100644
--- a/Kernel/VM/Region.h
+++ b/Kernel/VM/Region.h
@@ -128,6 +128,18 @@ public:
return size() / PAGE_SIZE;
}
+ const PhysicalPage* physical_page(size_t index) const
+ {
+ ASSERT(index < page_count());
+ return vmobject().physical_pages()[first_page_index() + index];
+ }
+
+ RefPtr<PhysicalPage>& physical_page_slot(size_t index)
+ {
+ ASSERT(index < page_count());
+ return vmobject().physical_pages()[first_page_index() + index];
+ }
+
size_t offset_in_vmobject() const
{
return m_offset_in_vmobject;