summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-09 01:27:30 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-09 11:46:31 +0200
commit6283acb6b78343a9f819fccf6748c80e3ecc488c (patch)
tree240a9e6ae7b5d606dfb774330f22079f0cb7075c /Kernel/Memory
parent00bbbdeda6df2f47d4313a262fa897d9491e0581 (diff)
downloadserenity-6283acb6b78343a9f819fccf6748c80e3ecc488c.zip
Kernel/SMP: Don't panic in inode fault if other CPU already paged it in
It may happen that CPU A manages to page in from the same inode while we're just entering the same page fault handler on CPU B. Handle it gracefully by checking if the data has already been paged in (instead of VERIFY'ing that it hasn't) and then remap the page if that's the case.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/Region.cpp11
1 files changed, 10 insertions, 1 deletions
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 3318fa5c02..648b20ba8a 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -401,7 +401,16 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
- VERIFY(vmobject_physical_page_entry.is_null());
+
+ {
+ ScopedSpinLock locker(inode_vmobject.m_lock);
+ if (!vmobject_physical_page_entry.is_null()) {
+ dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
+ if (!remap_vmobject_page(page_index_in_vmobject))
+ return PageFaultResponse::OutOfMemory;
+ return PageFaultResponse::Continue;
+ }
+ }
dbgln_if(PAGE_FAULT_DEBUG, "Inode fault in {} page index: {}", name(), page_index_in_region);