diff options
author | Andreas Kling <kling@serenityos.org> | 2022-08-22 14:56:26 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-08-22 17:56:03 +0200 |
commit | c8375c51fff9b614e2462c9e2cbbd228b618bf81 (patch) | |
tree | 8339f0d70dcccd6b1ec02016670543cafd10caec /Kernel | |
parent | a838fdfd8894d40caa0ba7caa70ad4891ad8d8d7 (diff) | |
download | serenity-c8375c51fff9b614e2462c9e2cbbd228b618bf81.zip |
Kernel: Stop taking MM lock while using PD/PT quickmaps
This is no longer required as these quickmaps are now per-CPU. :^)
Diffstat (limited to 'Kernel')
-rw-r--r-- | Kernel/Heap/kmalloc.cpp | 2 | ||||
-rw-r--r-- | Kernel/Memory/MemoryManager.cpp | 9 | ||||
-rw-r--r-- | Kernel/Memory/Region.cpp | 1 |
3 files changed, 0 insertions, 12 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 5172d2ee13..5925566e9e 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -334,7 +334,6 @@ struct KmallocGlobalData { auto cpu_supports_nx = Processor::current().has_nx(); SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock()); - SpinlockLocker mm_locker(Memory::s_mm_lock); for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) { // FIXME: We currently leak physical memory when mapping it into the kmalloc heap. @@ -367,7 +366,6 @@ struct KmallocGlobalData { // Make sure the entire kmalloc VM range is backed by page tables. // This avoids having to deal with lazy page table allocation during heap expansion. SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock()); - SpinlockLocker mm_locker(Memory::s_mm_lock); for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) { MM.ensure_pte(MM.kernel_page_directory(), vaddr); } diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 7f6478bfe9..909698eed4 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -140,7 +140,6 @@ UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel() UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() { SpinlockLocker page_lock(kernel_page_directory().get_lock()); - SpinlockLocker mm_lock(s_mm_lock); // Disable writing to the .ro_after_init section for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) { auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); @@ -152,7 +151,6 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() void MemoryManager::unmap_text_after_init() { SpinlockLocker page_lock(kernel_page_directory().get_lock()); - SpinlockLocker mm_lock(s_mm_lock); auto start = page_round_down((FlatPtr)&start_of_unmap_after_init); auto end = page_round_up((FlatPtr)&end_of_unmap_after_init).release_value_but_fixme_should_propagate_errors(); @@ -169,7 +167,6 @@ void MemoryManager::unmap_text_after_init() UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init() { - SpinlockLocker mm_lock(s_mm_lock); SpinlockLocker page_lock(kernel_page_directory().get_lock()); auto start = page_round_down((FlatPtr)start_of_kernel_ksyms); @@ -543,7 +540,6 @@ PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr) { VERIFY_INTERRUPTS_DISABLED(); - VERIFY(s_mm_lock.is_locked_by_current_processor()); VERIFY(page_directory.get_lock().is_locked_by_current_processor()); u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; @@ -560,7 +556,6 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) { VERIFY_INTERRUPTS_DISABLED(); - VERIFY(s_mm_lock.is_locked_by_current_processor()); VERIFY(page_directory.get_lock().is_locked_by_current_processor()); u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; @@ -602,7 +597,6 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release) { VERIFY_INTERRUPTS_DISABLED(); - VERIFY(s_mm_lock.is_locked_by_current_processor()); VERIFY(page_directory.get_lock().is_locked_by_current_processor()); u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; @@ -1038,7 +1032,6 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index) { VERIFY_INTERRUPTS_DISABLED(); - VERIFY(s_mm_lock.is_locked_by_current_processor()); VirtualAddress vaddr(KERNEL_QUICKMAP_PD_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; @@ -1058,7 +1051,6 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr) { VERIFY_INTERRUPTS_DISABLED(); - VERIFY(s_mm_lock.is_locked_by_current_processor()); VirtualAddress vaddr(KERNEL_QUICKMAP_PT_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; @@ -1162,7 +1154,6 @@ void MemoryManager::dump_kernel_regions() void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable) { SpinlockLocker page_lock(kernel_page_directory().get_lock()); - SpinlockLocker lock(s_mm_lock); auto* pte = ensure_pte(kernel_page_directory(), vaddr); VERIFY(pte); if (pte->is_writable() == writable) diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 6016a2c4fc..88867629b7 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -213,7 +213,6 @@ bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage PANIC("About to map mmap'ed page at a kernel address"); } - SpinlockLocker lock(s_mm_lock); auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr); if (!pte) return false; |