diff options
author | Andreas Kling <kling@serenityos.org> | 2021-01-27 21:01:45 +0100 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-01-27 21:14:42 +0100 |
commit | e67402c7027f13fcc03d61f88725470e4c66e820 (patch) | |
tree | 673d40ced3c4dec8da64200ee21c4c8398d6fcb6 /Kernel/VM/MemoryManager.cpp | |
parent | 67bc5e0bbdcf313b20b32069a5e9b9806f8bd790 (diff) | |
download | serenity-e67402c7027f13fcc03d61f88725470e4c66e820.zip |
Kernel: Remove Range "valid" state and use Optional<Range> instead
It's easier to understand VM ranges if they are always valid. We can
simply use an empty Optional<Range> to encode absence when needed.
Diffstat (limited to 'Kernel/VM/MemoryManager.cpp')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 7397ccd085..9642360e43 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -399,10 +399,10 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = ContiguousVMObject::create_with_size(size); - return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable); } OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable) @@ -410,12 +410,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = AnonymousVMObject::create_with_size(size, strategy); if (!vmobject) return {}; - return allocate_kernel_region_with_vmobject(range, vmobject.release_nonnull(), name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), name, access, user_accessible, cacheable); } OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) @@ -423,12 +423,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); if (!vmobject) return {}; - return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable); } OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) @@ -436,12 +436,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); if (!vmobject) return {}; - return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable); } OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable) @@ -467,9 +467,9 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; - return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable); } bool MemoryManager::commit_user_physical_pages(size_t page_count) |