summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-19 12:51:52 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-19 12:52:48 +0200
commit5ada38f9c3c525938ef9032b4d551d050661e2e9 (patch)
treef59b661cbeef6e7e574d44a2b708efaa3bb3e343
parenta84d893af862a40bd2c093a4b1d0eb6e7f1640da (diff)
downloadserenity-5ada38f9c3c525938ef9032b4d551d050661e2e9.zip
Kernel: Reduce time under VMObject lock while handling zero faults
We only need to hold the VMObject lock while inspecting and/or updating the physical page array in the VMObject.
-rw-r--r--Kernel/Memory/Region.cpp45
-rw-r--r--Kernel/Memory/Region.h2
2 files changed, 27 insertions, 20 deletions
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index c69004b451..d82aebdc6d 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -392,7 +392,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
auto phys_page = physical_page(page_index_in_region);
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
- return handle_zero_fault(page_index_in_region);
+ return handle_zero_fault(page_index_in_region, *phys_page);
}
return handle_cow_fault(page_index_in_region);
}
@@ -400,42 +400,49 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
return PageFaultResponse::ShouldCrash;
}
-PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
+PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, PhysicalPage& page_in_slot_at_time_of_fault)
{
VERIFY(vmobject().is_anonymous());
- SpinlockLocker locker(vmobject().m_lock);
-
- auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
- if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
- dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
- if (!remap_vmobject_page(page_index_in_vmobject, *page_slot))
- return PageFaultResponse::OutOfMemory;
- return PageFaultResponse::Continue;
- }
-
auto current_thread = Thread::current();
if (current_thread != nullptr)
current_thread->did_zero_fault();
- if (page_slot->is_lazy_committed_page()) {
+ RefPtr<PhysicalPage> new_physical_page;
+
+ if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
VERIFY(m_vmobject->is_anonymous());
- page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
- dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
+ new_physical_page = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
+ dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", new_physical_page->paddr());
} else {
auto page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (page_or_error.is_error()) {
dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
}
- page_slot = page_or_error.release_value();
- dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr());
+ new_physical_page = page_or_error.release_value();
+ dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", new_physical_page->paddr());
+ }
+
+ bool already_handled = false;
+
+ {
+ SpinlockLocker locker(vmobject().m_lock);
+ auto& page_slot = physical_page_slot(page_index_in_region);
+ already_handled = !page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page();
+ if (already_handled) {
+ // Someone else already faulted in a new page in this slot. That's fine, we'll just remap with their page.
+ new_physical_page = page_slot;
+ } else {
+ // Install the newly allocated page into the VMObject.
+ page_slot = new_physical_page;
+ }
}
- if (!remap_vmobject_page(page_index_in_vmobject, *page_slot)) {
- dmesgln("MM: handle_zero_fault was unable to allocate a page table to map {}", page_slot);
+ if (!remap_vmobject_page(page_index_in_vmobject, *new_physical_page)) {
+ dmesgln("MM: handle_zero_fault was unable to allocate a page table to map {}", new_physical_page);
return PageFaultResponse::OutOfMemory;
}
return PageFaultResponse::Continue;
diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h
index 5c3ba8c6fb..ade7bc9c76 100644
--- a/Kernel/Memory/Region.h
+++ b/Kernel/Memory/Region.h
@@ -211,7 +211,7 @@ private:
[[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index);
[[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index);
- [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index);
+ [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
[[nodiscard]] bool map_individual_page_impl(size_t page_index);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);