summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-01-12 14:32:21 +0100
committerAndreas Kling <kling@serenityos.org>2022-01-12 14:52:47 +0100
commitd8206c105947d0dd12e7431b9d35509f8e62735a (patch)
tree2d8fa70b6bff60c4464e5de27a28984caa074f81 /Kernel/Memory
parent2323cdd91419ba2bad419f62acdbbca259f6bd45 (diff)
downloadserenity-d8206c105947d0dd12e7431b9d35509f8e62735a.zip
Kernel: Don't release/relock spinlocks repeatedly during space teardown
Grab the page directory and MM locks once at the start of address space teardown, then hold onto them across all the region unmapping work.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/AddressSpace.cpp6
-rw-r--r--Kernel/Memory/Region.cpp13
-rw-r--r--Kernel/Memory/Region.h1
3 files changed, 15 insertions, 5 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp
index dbda1324e1..9be6e46914 100644
--- a/Kernel/Memory/AddressSpace.cpp
+++ b/Kernel/Memory/AddressSpace.cpp
@@ -321,9 +321,11 @@ void AddressSpace::dump_regions()
void AddressSpace::remove_all_regions(Badge<Process>)
{
VERIFY(Thread::current() == g_finalizer);
- SpinlockLocker lock(m_lock);
+ SpinlockLocker locker(m_lock);
+ SpinlockLocker pd_locker(m_page_directory->get_lock());
+ SpinlockLocker mm_locker(s_mm_lock);
for (auto& region : m_regions)
- (*region).unmap(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No);
+ (*region).unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker);
m_regions.clear();
}
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 3c3a2fd83f..76e31d7406 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -234,12 +234,19 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
return success;
}
-void Region::unmap(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb)
+void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldFlushTLB should_flush_tlb)
+{
+ if (!m_page_directory)
+ return;
+ SpinlockLocker pd_locker(m_page_directory->get_lock());
+ SpinlockLocker mm_locker(s_mm_lock);
+ unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker);
+}
+
+void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
{
if (!m_page_directory)
return;
- SpinlockLocker page_lock(m_page_directory->get_lock());
- SpinlockLocker lock(s_mm_lock);
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h
index 5ca89bfd53..b66300c241 100644
--- a/Kernel/Memory/Region.h
+++ b/Kernel/Memory/Region.h
@@ -179,6 +179,7 @@ public:
Yes,
};
void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes);
+ void unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker);
void remap();