diff options
author | Andreas Kling <kling@serenityos.org> | 2020-03-01 15:55:27 +0100 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-03-01 15:56:23 +0100 |
commit | ecdd9a5bc667b455cf359170d7cf2e406c1bca8e (patch) | |
tree | 1836c25c38e6771b7ac9c5fdcdab06fd4cfbfee1 /Kernel/VM | |
parent | 5e0c4d689fc4fac69db5c7fd9e2f5e6a82b97b89 (diff) | |
download | serenity-ecdd9a5bc667b455cf359170d7cf2e406c1bca8e.zip |
Kernel: Reduce code duplication a little bit in Region allocation
This patch reduces the number of code paths that lead to the allocation
of a Region object. It's quite hard to follow the various ways in which
this can happen, so this is an effort to simplify.
Diffstat (limited to 'Kernel/VM')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 42 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.h | 1 |
2 files changed, 21 insertions, 22 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 585310f81c..9e8a47a843 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -310,17 +310,14 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable) { - InterruptDisabler disabler; ASSERT(!(size % PAGE_SIZE)); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - ASSERT(range.is_valid()); + if (!range.is_valid()) + return nullptr; auto vmobject = AnonymousVMObject::create_with_size(size); - OwnPtr<Region> region; - if (user_accessible) - region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable); - else - region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable); - region->map(kernel_page_directory()); + auto region = allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable); + if (!region) + return nullptr; if (should_commit) region->commit(); return region; @@ -328,20 +325,14 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) { - InterruptDisabler disabler; ASSERT(!(size % PAGE_SIZE)); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - ASSERT(range.is_valid()); + if (!range.is_valid()) + return nullptr; auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); if (!vmobject) return nullptr; - OwnPtr<Region> region; - if (user_accessible) - region = Region::create_user_accessible(range, vmobject.release_nonnull(), 0, name, access, cacheable); - else - region = Region::create_kernel_only(range, vmobject.release_nonnull(), 0, name, access, cacheable); - region->map(kernel_page_directory()); - return region; + return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable); } OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable) @@ -349,21 +340,28 @@ OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size return allocate_kernel_region(size, name, access, true, true, cacheable); } -OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) +OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable) { InterruptDisabler disabler; - ASSERT(!(size % PAGE_SIZE)); - auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - ASSERT(range.is_valid()); OwnPtr<Region> region; if (user_accessible) region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable); else region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable); - region->map(kernel_page_directory()); + if (region) + region->map(kernel_page_directory()); return region; } +OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) +{ + ASSERT(!(size % PAGE_SIZE)); + auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); + if (!range.is_valid()) + return nullptr; + return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable); +} + void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page) { for (auto& region : m_user_physical_regions) { diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 1c58d3aae0..0b7ca24f26 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -106,6 +106,7 @@ public: OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true); OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false); + OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false); OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false); unsigned user_physical_pages() const { return m_user_physical_pages; } |