summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-18 19:20:33 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-18 19:20:33 +0200
commit4bc3745ce681e54c7f719fa5b98858c085b2f280 (patch)
treed8efab49e8ea3baa57726feb4f213aa686355487 /Kernel/Memory
parentc3ad4ffcece4c3693185f0de82573d93db3c3895 (diff)
downloadserenity-4bc3745ce681e54c7f719fa5b98858c085b2f280.zip
Kernel: Make Region's physical page accessors safer to use
Region::physical_page() now takes the VMObject lock while accessing the physical pages array, and returns a RefPtr<PhysicalPage>. This ensures that the array access is safe. Region::physical_page_slot() now VERIFY()'s that the VMObject lock is held by the caller. Since we're returning a reference to the physical page slot in the VMObject's physical page array, this is the best we can do here.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/Region.cpp15
-rw-r--r--Kernel/Memory/Region.h2
2 files changed, 10 insertions, 7 deletions
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 65efde41d2..4e6e3ce68b 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -164,7 +164,7 @@ size_t Region::amount_resident() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
- auto const* page = physical_page(i);
+ auto page = physical_page(i);
if (page && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
bytes += PAGE_SIZE;
}
@@ -175,7 +175,7 @@ size_t Region::amount_shared() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
- auto const* page = physical_page(i);
+ auto page = physical_page(i);
if (page && page->ref_count() > 1 && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
bytes += PAGE_SIZE;
}
@@ -367,6 +367,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
return handle_inode_fault(page_index_in_region);
}
+ SpinlockLocker vmobject_locker(vmobject().m_lock);
auto& page_slot = physical_page_slot(page_index_in_region);
if (page_slot->is_lazy_committed_page()) {
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
@@ -388,7 +389,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
VERIFY(fault.type() == PageFault::Type::ProtectionViolation);
if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
dbgln_if(PAGE_FAULT_DEBUG, "PV(cow) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
- auto* phys_page = physical_page(page_index_in_region);
+ auto phys_page = physical_page(page_index_in_region);
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
return handle_zero_fault(page_index_in_region);
@@ -404,11 +405,11 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
VERIFY_INTERRUPTS_DISABLED();
VERIFY(vmobject().is_anonymous());
+ SpinlockLocker locker(vmobject().m_lock);
+
auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
- SpinlockLocker locker(vmobject().m_lock);
-
if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
if (!remap_vmobject_page(page_index_in_vmobject, *page_slot))
@@ -541,14 +542,16 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
-PhysicalPage const* Region::physical_page(size_t index) const
+RefPtr<PhysicalPage> Region::physical_page(size_t index) const
{
+ SpinlockLocker vmobject_locker(vmobject().m_lock);
VERIFY(index < page_count());
return vmobject().physical_pages()[first_page_index() + index];
}
RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
{
+ VERIFY(vmobject().m_lock.is_locked_by_current_processor());
VERIFY(index < page_count());
return vmobject().physical_pages()[first_page_index() + index];
}
diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h
index 0893b3e70e..5c3ba8c6fb 100644
--- a/Kernel/Memory/Region.h
+++ b/Kernel/Memory/Region.h
@@ -152,7 +152,7 @@ public:
return size() / PAGE_SIZE;
}
- PhysicalPage const* physical_page(size_t index) const;
+ RefPtr<PhysicalPage> physical_page(size_t index) const;
RefPtr<PhysicalPage>& physical_page_slot(size_t index);
[[nodiscard]] size_t offset_in_vmobject() const