diff options
author | Andreas Kling <kling@serenityos.org> | 2022-08-23 12:38:17 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-08-24 14:57:51 +0200 |
commit | d6ef18f587d4a7e4f58487c84e0b9eb260f3ec5a (patch) | |
tree | c6627f629b245831df710500b05ccd274f7fa911 | |
parent | dc9d2c1b10999a177f27b0f0f8c3ee6df3a61dad (diff) | |
download | serenity-d6ef18f587d4a7e4f58487c84e0b9eb260f3ec5a.zip |
Kernel: Don't hog the MM lock while unmapping regions
We were holding the MM lock across all of the region unmapping code.
This was previously necessary since the quickmaps used during unmapping
required holding the MM lock.
Now that it's no longer necessary, we can leave the MM lock alone here.
-rw-r--r-- | Kernel/Memory/AddressSpace.cpp | 3 | ||||
-rw-r--r-- | Kernel/Memory/Region.cpp | 8 | ||||
-rw-r--r-- | Kernel/Memory/Region.h | 2 |
3 files changed, 5 insertions, 8 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index f215e572cd..3399c48f14 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -341,10 +341,9 @@ void AddressSpace::remove_all_regions(Badge<Process>) VERIFY(Thread::current() == g_finalizer); { SpinlockLocker pd_locker(m_page_directory->get_lock()); - SpinlockLocker mm_locker(s_mm_lock); m_region_tree.with([&](auto& region_tree) { for (auto& region : region_tree.regions()) - region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker); + region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker); }); } diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 6bc31e87a6..883e813867 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -70,8 +70,7 @@ Region::~Region() if (!is_readable() && !is_writable() && !is_executable()) { // If the region is "PROT_NONE", we didn't map it in the first place. } else { - SpinlockLocker mm_locker(s_mm_lock); - unmap_with_locks_held(ShouldFlushTLB::Yes, pd_locker, mm_locker); + unmap_with_locks_held(ShouldFlushTLB::Yes, pd_locker); VERIFY(!m_page_directory); } } @@ -268,11 +267,10 @@ void Region::unmap(ShouldFlushTLB should_flush_tlb) if (!m_page_directory) return; SpinlockLocker pd_locker(m_page_directory->get_lock()); - SpinlockLocker mm_locker(s_mm_lock); - unmap_with_locks_held(should_flush_tlb, pd_locker, mm_locker); + unmap_with_locks_held(should_flush_tlb, pd_locker); } -void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&) +void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&) { if (!m_page_directory) return; diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index fbbf61ea5f..dfe660d1c0 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -183,7 +183,7 @@ public: void set_page_directory(PageDirectory&); ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes); void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes); - void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker); + void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker); void remap(); |