summaryrefslogtreecommitdiff
path: root/Kernel/Memory/Region.cpp
diff options
context:
space:
mode:
authorIdan Horowitz <idan.horowitz@gmail.com>2022-01-28 16:36:53 +0200
committerIdan Horowitz <idan.horowitz@gmail.com>2022-01-28 19:05:52 +0200
commit5146315a150b9c71382eadc0e229ee30f38305b5 (patch)
treec540163bfba228c03411081f6b1bb916a12c7f8e /Kernel/Memory/Region.cpp
parentbd5b56cab0abbf4272fe2e006413f324505f552e (diff)
downloadserenity-5146315a150b9c71382eadc0e229ee30f38305b5.zip
Kernel: Convert MemoryManager::allocate_user_physical_page to ErrorOr
This allows is to use the TRY macro at the call sites, instead of using clunky null checks.
Diffstat (limited to 'Kernel/Memory/Region.cpp')
-rw-r--r--Kernel/Memory/Region.cpp11
1 files changed, 6 insertions, 5 deletions
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index d3323fbc99..c551cba7f8 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -407,11 +407,12 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
} else {
- page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
- if (page_slot.is_null()) {
+ auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
+ if (page_or_error.is_error()) {
dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
}
+ page_slot = page_or_error.release_value();
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr());
}
@@ -495,12 +496,12 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
- vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
-
- if (vmobject_physical_page_entry.is_null()) {
+ auto vmobject_physical_page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
+ if (vmobject_physical_page_or_error.is_error()) {
dmesgln("MM: handle_inode_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
}
+ vmobject_physical_page_entry = vmobject_physical_page_or_error.release_value();
{
SpinlockLocker mm_locker(s_mm_lock);