diff options
author | Brian Gianforcaro <bgianf@serenityos.org> | 2021-05-18 01:45:05 -0700 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-05-20 08:10:07 +0200 |
commit | 1415b2cfc300db1128fce1fa98e3e10a49810610 (patch) | |
tree | c5a26973833fcfab514baf5f8fe7f555ca3a83cb /Kernel | |
parent | a43bccfc3ce4818e27d42b756fd671ed6326e8be (diff) | |
download | serenity-1415b2cfc300db1128fce1fa98e3e10a49810610.zip |
Kernel: Do not allocate AnonymousVMObject's under spin lock
Spinlocks guard short regions, with hopefully no other locks being taken
in the process. Violating constraints usually had detrimental effects on
platform stability as well as performance and scalability. Allocating
memory takes it own locks, and can in some cases even allocate new
regions, and thus violates these tenants.
Move the AnonymousVMObject creation outside of the spinlock as
creation does not modify any shared state.
Diffstat (limited to 'Kernel')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 00dfdae1ae..d278cc8159 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -470,40 +470,40 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); + auto vm_object = AnonymousVMObject::create_with_size(size, strategy); + if (!vm_object) + return {}; ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::create_with_size(size, strategy); - if (!vmobject) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), move(name), access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vm_object.release_nonnull(), move(name), access, cacheable); } OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, Region::Access access, Region::Cacheable cacheable) { + auto vm_object = AnonymousVMObject::create_for_physical_range(paddr, size); + if (!vm_object) + return {}; VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); - if (!vmobject) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vm_object, move(name), access, cacheable); } OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, Region::Access access, Region::Cacheable cacheable) { + auto vm_object = AnonymousVMObject::create_for_physical_range(paddr, size); + if (!vm_object) + return {}; VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); - if (!vmobject) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vm_object, move(name), access, cacheable); } OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, String name, Region::Access access, Region::Cacheable cacheable) |