summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-10-31 17:19:50 -0600
committerAndreas Kling <kling@serenityos.org>2020-11-01 18:48:36 +0100
commit13aa3d2d62e3fcd21c029cf91c930d09deb148cb (patch)
tree208ba309ed21d5427704d33da900886221fdc642
parent8c4a2c34d307d38b1590634d0294f8f1b8d106b9 (diff)
downloadserenity-13aa3d2d62e3fcd21c029cf91c930d09deb148cb.zip
Kernel: Flush TLB when quick-mapping PD/PT that was mapped on other CPU
If a PD/PT was quick-mapped by another CPU we still need to flush the TLB on the current CPU. Fixes #3885
-rw-r--r--Kernel/VM/MemoryManager.cpp16
-rw-r--r--Kernel/VM/MemoryManager.h3
2 files changed, 19 insertions, 0 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 9cebe27a20..fca0b8589a 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -648,6 +648,7 @@ extern "C" PageTableEntry boot_pd3_pt1023[1024];
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{
ASSERT(s_mm_lock.own_lock());
+ auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[4];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
if (pte.physical_page_base() != pd_paddr.as_ptr()) {
@@ -662,13 +663,21 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
// mapping, it is sufficient to only flush on the current CPU. Other
// CPUs trying to use this API must wait on the MM lock anyway
flush_tlb_local(VirtualAddress(0xffe04000));
+ } else {
+ // Even though we don't allow this to be called concurrently, it's
+ // possible that this PD was mapped on a different CPU and we don't
+ // broadcast the flush. If so, we still need to flush the TLB.
+ if (mm_data.m_last_quickmap_pd != pd_paddr)
+ flush_tlb_local(VirtualAddress(0xffe04000));
}
+ mm_data.m_last_quickmap_pd = pd_paddr;
return (PageDirectoryEntry*)0xffe04000;
}
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{
ASSERT(s_mm_lock.own_lock());
+ auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[0];
if (pte.physical_page_base() != pt_paddr.as_ptr()) {
#ifdef MM_DEBUG
@@ -682,7 +691,14 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
// mapping, it is sufficient to only flush on the current CPU. Other
// CPUs trying to use this API must wait on the MM lock anyway
flush_tlb_local(VirtualAddress(0xffe00000));
+ } else {
+ // Even though we don't allow this to be called concurrently, it's
+ // possible that this PT was mapped on a different CPU and we don't
+ // broadcast the flush. If so, we still need to flush the TLB.
+ if (mm_data.m_last_quickmap_pt != pt_paddr)
+ flush_tlb_local(VirtualAddress(0xffe00000));
}
+ mm_data.m_last_quickmap_pt = pt_paddr;
return (PageTableEntry*)0xffe00000;
}
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 2bf767a766..24c6fedf70 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -70,6 +70,9 @@ class SynthFSInode;
struct MemoryManagerData {
SpinLock<u8> m_quickmap_in_use;
u32 m_quickmap_prev_flags;
+
+ PhysicalAddress m_last_quickmap_pd;
+ PhysicalAddress m_last_quickmap_pt;
};
extern RecursiveSpinLock s_mm_lock;