summaryrefslogtreecommitdiff
path: root/Kernel/Heap/kmalloc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel/Heap/kmalloc.cpp')
-rw-r--r--Kernel/Heap/kmalloc.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp
index 5e4f2ceff3..22681b985f 100644
--- a/Kernel/Heap/kmalloc.cpp
+++ b/Kernel/Heap/kmalloc.cpp
@@ -332,8 +332,8 @@ struct KmallocGlobalData {
auto cpu_supports_nx = Processor::current().has_nx();
- SpinlockLocker mm_locker(Memory::s_mm_lock);
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
+ SpinlockLocker mm_locker(Memory::s_mm_lock);
for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
// FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
@@ -365,8 +365,8 @@ struct KmallocGlobalData {
// Make sure the entire kmalloc VM range is backed by page tables.
// This avoids having to deal with lazy page table allocation during heap expansion.
- SpinlockLocker mm_locker(Memory::s_mm_lock);
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
+ SpinlockLocker mm_locker(Memory::s_mm_lock);
for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
}