diff options
author | Andreas Kling <kling@serenityos.org> | 2022-08-18 16:09:58 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-08-18 18:52:34 +0200 |
commit | abb84b9fcdf8b8d992967191ac1597aaa653c29d (patch) | |
tree | 0349dfb92871cd70314b322504703195f82f4048 /Kernel/Heap | |
parent | 27c1135d307efde8d9baef2affb26be568d50263 (diff) | |
download | serenity-abb84b9fcdf8b8d992967191ac1597aaa653c29d.zip |
Kernel: Fix inconsistent lock acquisition order in kmalloc
We always want to grab the page directory lock before the MM lock.
This fixes a deadlock I encountered when building DOOM with make -j4.
Diffstat (limited to 'Kernel/Heap')
-rw-r--r-- | Kernel/Heap/kmalloc.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 5e4f2ceff3..22681b985f 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -332,8 +332,8 @@ struct KmallocGlobalData { auto cpu_supports_nx = Processor::current().has_nx(); - SpinlockLocker mm_locker(Memory::s_mm_lock); SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock()); + SpinlockLocker mm_locker(Memory::s_mm_lock); for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) { // FIXME: We currently leak physical memory when mapping it into the kmalloc heap. @@ -365,8 +365,8 @@ struct KmallocGlobalData { // Make sure the entire kmalloc VM range is backed by page tables. // This avoids having to deal with lazy page table allocation during heap expansion. - SpinlockLocker mm_locker(Memory::s_mm_lock); SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock()); + SpinlockLocker mm_locker(Memory::s_mm_lock); for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) { MM.ensure_pte(MM.kernel_page_directory(), vaddr); } |