summaryrefslogtreecommitdiff
path: root/Kernel/VM/MemoryManager.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2020-02-21 13:05:39 +0100
committerAndreas Kling <kling@serenityos.org>2020-02-21 15:49:39 +0100
commit59b9e49bcdad278e50400b3cdb41bc83e744d604 (patch)
treeb380db540d67500cd1beaf5e5bbbdd981360cf34 /Kernel/VM/MemoryManager.cpp
parentf9a138aa4b9d96f59d20219179038805b0c8755e (diff)
downloadserenity-59b9e49bcdad278e50400b3cdb41bc83e744d604.zip
Kernel: Don't trigger page faults during profiling stack walk
The kernel sampling profiler will walk thread stacks during the timer tick handler. Since it's not safe to trigger page faults during IRQ's, we now avoid this by checking the page tables manually before accessing each stack location.
Diffstat (limited to 'Kernel/VM/MemoryManager.cpp')
-rw-r--r--Kernel/VM/MemoryManager.cpp26
1 files changed, 26 insertions, 0 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 1e71e3172e..c9a1846733 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -191,6 +191,21 @@ void MemoryManager::parse_memory_map()
m_user_physical_pages += region.finalize_capacity();
}
+const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
+{
+ ASSERT_INTERRUPTS_DISABLED();
+ u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
+ u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
+ u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
+
+ auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
+ const PageDirectoryEntry& pde = pd[page_directory_index];
+ if (!pde.is_present())
+ return nullptr;
+
+ return &quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
+}
+
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
@@ -611,6 +626,17 @@ bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress
return validate_range<AccessSpace::Kernel, AccessType::Read>(process, vaddr, size);
}
+bool MemoryManager::can_read_without_faulting(const Process& process, VirtualAddress vaddr, size_t size) const
+{
+ // FIXME: Use the size argument!
+ UNUSED_PARAM(size);
+ auto* pte = const_cast<MemoryManager*>(this)->pte(process.page_directory(), vaddr);
+ if (!pte)
+ return false;
+ return pte->is_present();
+}
+
+
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr, size_t size) const
{
if (!is_user_address(vaddr))