diff options
author | Andreas Kling <kling@serenityos.org> | 2021-08-09 01:26:02 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-08-09 11:46:31 +0200 |
commit | 00bbbdeda6df2f47d4313a262fa897d9491e0581 (patch) | |
tree | d29cd667a660a843134091c3ba367e02882874b8 /Kernel/Memory | |
parent | d21b8f9013bbcc84b6483d2ff014e66d74dfdb7c (diff) | |
download | serenity-00bbbdeda6df2f47d4313a262fa897d9491e0581.zip |
Kernel/SMP: Always take PageDirectory lock before the MemoryManager lock
This prevents deadlocking due to inconsistent acquisition order.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r-- | Kernel/Memory/MemoryManager.cpp | 15 | ||||
-rw-r--r-- | Kernel/Memory/Region.cpp | 4 |
2 files changed, 9 insertions, 10 deletions
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 0f03de94cd..1dc0aecd96 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image() UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() { - ScopedSpinLock mm_lock(s_mm_lock); ScopedSpinLock page_lock(kernel_page_directory().get_lock()); + ScopedSpinLock mm_lock(s_mm_lock); // Disable writing to the .ro_after_init section for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) { auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); @@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() void MemoryManager::unmap_text_after_init() { - ScopedSpinLock mm_lock(s_mm_lock); ScopedSpinLock page_lock(kernel_page_directory().get_lock()); + ScopedSpinLock mm_lock(s_mm_lock); auto start = page_round_down((FlatPtr)&start_of_unmap_after_init); auto end = page_round_up((FlatPtr)&end_of_unmap_after_init); @@ -703,7 +703,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); - ScopedSpinLock lock(s_mm_lock); + ScopedSpinLock lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -721,7 +721,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy); if (!vm_object) return {}; - ScopedSpinLock lock(s_mm_lock); + ScopedSpinLock lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -734,7 +734,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size if (!vm_object) return {}; VERIFY(!(size % PAGE_SIZE)); - ScopedSpinLock lock(s_mm_lock); + ScopedSpinLock lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -743,7 +743,6 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) { - ScopedSpinLock lock(s_mm_lock); auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable); if (region) region->map(kernel_page_directory()); @@ -753,7 +752,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); - ScopedSpinLock lock(s_mm_lock); + ScopedSpinLock lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -1109,8 +1108,8 @@ void MemoryManager::dump_kernel_regions() void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable) { - ScopedSpinLock lock(s_mm_lock); ScopedSpinLock page_lock(kernel_page_directory().get_lock()); + ScopedSpinLock lock(s_mm_lock); auto* pte = ensure_pte(kernel_page_directory(), vaddr); VERIFY(pte); if (pte->is_writable() == writable) diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 3a328c2205..3318fa5c02 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -236,10 +236,10 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush) void Region::unmap(ShouldDeallocateVirtualRange deallocate_range) { - ScopedSpinLock lock(s_mm_lock); if (!m_page_directory) return; ScopedSpinLock page_lock(m_page_directory->get_lock()); + ScopedSpinLock lock(s_mm_lock); size_t count = page_count(); for (size_t i = 0; i < count; ++i) { auto vaddr = vaddr_from_page_index(i); @@ -261,8 +261,8 @@ void Region::set_page_directory(PageDirectory& page_directory) bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb) { - ScopedSpinLock lock(s_mm_lock); ScopedSpinLock page_lock(page_directory.get_lock()); + ScopedSpinLock lock(s_mm_lock); // FIXME: Find a better place for this sanity check(?) if (is_user() && !is_shared()) { |