diff options
author | Andreas Kling <kling@serenityos.org> | 2021-12-19 16:38:46 +0100 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-12-19 18:18:38 +0100 |
commit | 8ef9b003adb7922d9154793b745b1127f418b483 (patch) | |
tree | 4896be30da16357a3c17d240f42c75493b2eea48 | |
parent | d82c91ac2c83b68d9ac41b1d32436a2eb9535ab6 (diff) | |
download | serenity-8ef9b003adb7922d9154793b745b1127f418b483.zip |
Kernel: Stop perf event stack walk on bogus userspace->kernel traversal
When walking the stack to generate a perf_event sample, we now check
if a userspace stack frame points back into kernel memory.
It was possible to use this as an arbitrary kernel memory read. :^)
-rw-r--r-- | Kernel/PerformanceEventBuffer.cpp | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/Kernel/PerformanceEventBuffer.cpp b/Kernel/PerformanceEventBuffer.cpp index 14ec6498cc..906c9c85e4 100644 --- a/Kernel/PerformanceEventBuffer.cpp +++ b/Kernel/PerformanceEventBuffer.cpp @@ -43,10 +43,21 @@ static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(Fl FlatPtr stack_ptr = bp; // FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks. SmapDisabler disabler; + // NOTE: The stack should always have kernel frames first, followed by userspace frames. + // If a userspace frame points back into kernel memory, something is afoot. + bool is_walking_userspace_stack = false; while (stack_ptr) { void* fault_at; if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at)) break; + if (!Memory::is_user_address(VirtualAddress { stack_ptr })) { + if (is_walking_userspace_stack) { + dbgln("SHENANIGANS! Userspace stack points back into kernel memory"); + break; + } + } else { + is_walking_userspace_stack = true; + } FlatPtr retaddr; if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at)) break; |