summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorIdan Horowitz <idan.horowitz@gmail.com>2022-01-28 16:36:53 +0200
committerIdan Horowitz <idan.horowitz@gmail.com>2022-01-28 19:05:52 +0200
commit5146315a150b9c71382eadc0e229ee30f38305b5 (patch)
treec540163bfba228c03411081f6b1bb916a12c7f8e /Kernel/Memory
parentbd5b56cab0abbf4272fe2e006413f324505f552e (diff)
downloadserenity-5146315a150b9c71382eadc0e229ee30f38305b5.zip
Kernel: Convert MemoryManager::allocate_user_physical_page to ErrorOr
This allows is to use the TRY macro at the call sites, instead of using clunky null checks.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/AnonymousVMObject.cpp5
-rw-r--r--Kernel/Memory/MemoryManager.cpp11
-rw-r--r--Kernel/Memory/MemoryManager.h2
-rw-r--r--Kernel/Memory/PageDirectory.cpp12
-rw-r--r--Kernel/Memory/Region.cpp11
5 files changed, 19 insertions, 22 deletions
diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp
index fe7471bcfc..5f7d3b6fca 100644
--- a/Kernel/Memory/AnonymousVMObject.cpp
+++ b/Kernel/Memory/AnonymousVMObject.cpp
@@ -322,11 +322,12 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
page = m_shared_committed_cow_pages->take_one();
} else {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!");
- page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
- if (page.is_null()) {
+ auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
+ if (page_or_error.is_error()) {
dmesgln("MM: handle_cow_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
}
+ page = page_or_error.release_value();
}
dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr());
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index e1575c7f00..07eb097970 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -572,11 +572,12 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
bool did_purge = false;
- auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
- if (!page_table) {
+ auto page_table_or_error = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
+ if (page_table_or_error.is_error()) {
dbgln("MM: Unable to allocate page table to map {}", vaddr);
return nullptr;
}
+ auto page_table = page_table_or_error.release_value();
if (did_purge) {
// If any memory had to be purged, ensure_pte may have been called as part
// of the purging process. So we need to re-map the pd in this case to ensure
@@ -892,7 +893,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page
return page.release_nonnull();
}
-RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
+ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
SpinlockLocker lock(s_mm_lock);
auto page = find_free_user_physical_page(false);
@@ -918,7 +919,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
});
if (!page) {
dmesgln("MM: no user physical pages available");
- return {};
+ return ENOMEM;
}
}
@@ -930,7 +931,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
if (did_purge)
*did_purge = purged_pages;
- return page;
+ return page.release_nonnull();
}
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h
index 0c1e454fd5..d9754bc12a 100644
--- a/Kernel/Memory/MemoryManager.h
+++ b/Kernel/Memory/MemoryManager.h
@@ -171,7 +171,7 @@ public:
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
- RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
+ ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_supervisor_physical_page();
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_supervisor_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress);
diff --git a/Kernel/Memory/PageDirectory.cpp b/Kernel/Memory/PageDirectory.cpp
index 1398343b23..aa59b8c568 100644
--- a/Kernel/Memory/PageDirectory.cpp
+++ b/Kernel/Memory/PageDirectory.cpp
@@ -61,19 +61,13 @@ ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace(Vi
SpinlockLocker lock(s_mm_lock);
#if ARCH(X86_64)
- directory->m_pml4t = MM.allocate_user_physical_page();
- if (!directory->m_pml4t)
- return ENOMEM;
+ directory->m_pml4t = TRY(MM.allocate_user_physical_page());
#endif
- directory->m_directory_table = MM.allocate_user_physical_page();
- if (!directory->m_directory_table)
- return ENOMEM;
+ directory->m_directory_table = TRY(MM.allocate_user_physical_page());
auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) {
- directory->m_directory_pages[i] = MM.allocate_user_physical_page();
- if (!directory->m_directory_pages[i])
- return ENOMEM;
+ directory->m_directory_pages[i] = TRY(MM.allocate_user_physical_page());
}
// Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index d3323fbc99..c551cba7f8 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -407,11 +407,12 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
} else {
- page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
- if (page_slot.is_null()) {
+ auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
+ if (page_or_error.is_error()) {
dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
}
+ page_slot = page_or_error.release_value();
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr());
}
@@ -495,12 +496,12 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
- vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
-
- if (vmobject_physical_page_entry.is_null()) {
+ auto vmobject_physical_page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
+ if (vmobject_physical_page_or_error.is_error()) {
dmesgln("MM: handle_inode_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory;
}
+ vmobject_physical_page_entry = vmobject_physical_page_or_error.release_value();
{
SpinlockLocker mm_locker(s_mm_lock);