summaryrefslogtreecommitdiff
path: root/Kernel/Memory/MemoryManager.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-22 12:57:58 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-23 00:02:09 +0200
commita930877f31fb117ce4b38d4782dd288093700d8a (patch)
treebe22fc9f535db876433c71587e0e34602ebd378e /Kernel/Memory/MemoryManager.cpp
parent1b9916439fc93395386d160e25858b300444c4db (diff)
downloadserenity-a930877f31fb117ce4b38d4782dd288093700d8a.zip
Kernel: Mape quickmap functions VERIFY that MM lock is held
The quickmap_page() and unquickmap_page() functions are used to map a single physical page at a kernel virtual address for temporary access. These use the per-CPU quickmap buffer in the page tables, and access to this is guarded by the MM lock. To prevent bugs, quickmap_page() should not *take* the MM lock, but rather verify that it is already held! This exposed two situations where we were using quickmap without holding the MM lock during page fault handling. This patch is forced to fix these issues (which is great!) :^)
Diffstat (limited to 'Kernel/Memory/MemoryManager.cpp')
-rw-r--r--Kernel/Memory/MemoryManager.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index 237e60e6d7..324f9cb083 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -1004,9 +1004,9 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
{
VERIFY_INTERRUPTS_DISABLED();
+ VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
- SpinlockLocker lock(s_mm_lock);
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@@ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
void MemoryManager::unquickmap_page()
{
VERIFY_INTERRUPTS_DISABLED();
- SpinlockLocker lock(s_mm_lock);
+ VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);