diff options
author | Idan Horowitz <idan.horowitz@gmail.com> | 2022-01-28 16:36:53 +0200 |
---|---|---|
committer | Idan Horowitz <idan.horowitz@gmail.com> | 2022-01-28 19:05:52 +0200 |
commit | 5146315a150b9c71382eadc0e229ee30f38305b5 (patch) | |
tree | c540163bfba228c03411081f6b1bb916a12c7f8e /Kernel/Memory/MemoryManager.cpp | |
parent | bd5b56cab0abbf4272fe2e006413f324505f552e (diff) | |
download | serenity-5146315a150b9c71382eadc0e229ee30f38305b5.zip |
Kernel: Convert MemoryManager::allocate_user_physical_page to ErrorOr
This allows is to use the TRY macro at the call sites, instead of using
clunky null checks.
Diffstat (limited to 'Kernel/Memory/MemoryManager.cpp')
-rw-r--r-- | Kernel/Memory/MemoryManager.cpp | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index e1575c7f00..07eb097970 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -572,11 +572,12 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index]; bool did_purge = false; - auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge); - if (!page_table) { + auto page_table_or_error = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge); + if (page_table_or_error.is_error()) { dbgln("MM: Unable to allocate page table to map {}", vaddr); return nullptr; } + auto page_table = page_table_or_error.release_value(); if (did_purge) { // If any memory had to be purged, ensure_pte may have been called as part // of the purging process. So we need to re-map the pd in this case to ensure @@ -892,7 +893,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page return page.release_nonnull(); } -RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) +ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) { SpinlockLocker lock(s_mm_lock); auto page = find_free_user_physical_page(false); @@ -918,7 +919,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s }); if (!page) { dmesgln("MM: no user physical pages available"); - return {}; + return ENOMEM; } } @@ -930,7 +931,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s if (did_purge) *did_purge = purged_pages; - return page; + return page.release_nonnull(); } ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size) |