summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-07-03 12:12:34 -0600
committerAndreas Kling <kling@serenityos.org>2020-07-03 21:16:56 +0200
commitbb84fad0bf3617ab78d027347e1c24ab1bf555a7 (patch)
treef6dd5ef9ee2e6609e0e45734bfdda3a9737c9004
parent6d5bd8c76b1eec679ec6ca8201eef6ec5ff21a9e (diff)
downloadserenity-bb84fad0bf3617ab78d027347e1c24ab1bf555a7.zip
Kernel: Fix retreiving frame pointer from a thread
If we're trying to walk the stack for another thread, we can no longer retreive the EBP register from Thread::m_tss. Instead, we need to look at the top of the kernel stack, because all threads not currently running were last in kernel mode. Context switches now always trigger a brief switch to kernel mode, and Thread::m_tss only is used to save ESP and EIP. Fixes #2678
-rw-r--r--Kernel/Arch/i386/CPU.cpp39
-rw-r--r--Kernel/Arch/i386/CPU.h1
-rw-r--r--Kernel/Thread.cpp50
-rw-r--r--Kernel/Thread.h5
4 files changed, 67 insertions, 28 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 777c1df580..686da78fd5 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -960,6 +960,45 @@ const DescriptorTablePointer& Processor::get_gdtr()
return m_gdtr;
}
+bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
+{
+ ScopedCritical critical;
+ auto& proc = Processor::current();
+ if (&thread == proc.current_thread()) {
+ ASSERT(thread.state() == Thread::Running);
+ asm volatile("movl %%ebp, %%eax"
+ : "=g"(frame_ptr));
+ } else {
+ // Since the thread may be running on another processor, there
+ // is a chance a context switch may happen while we're trying
+ // to get it. It also won't be entirely accurate and merely
+ // reflect the status at the last context switch.
+ ScopedSpinLock lock(g_scheduler_lock);
+ if (thread.state() == Thread::Running) {
+ ASSERT(thread.cpu() != proc.id());
+ // TODO: If this is the case, the thread is currently running
+ // on another processor. We can't trust the kernel stack as
+ // it may be changing at any time. We need to probably send
+ // an ICI to that processor, have it walk the stack and wait
+ // until it returns the data back to us
+ dbg() << "CPU[" << proc.id() << "] getting stack for "
+ << thread << " on other CPU# " << thread.cpu() << " not yet implemented!";
+ frame_ptr = eip = 0; // TODO
+ return false;
+ } else {
+ // We need to retrieve ebp from what was last pushed to the kernel
+ // stack. Before switching out of that thread, it switch_context
+ // pushed the callee-saved registers, and the last of them happens
+ // to be ebp.
+ auto& tss = thread.tss();
+ u32* stack_top = reinterpret_cast<u32*>(tss.esp);
+ frame_ptr = stack_top[0];
+ eip = tss.eip;
+ }
+ }
+ return true;
+}
+
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
{
ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index dfb1b9c52a..f11988b88d 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -775,6 +775,7 @@ public:
void switch_context(Thread* from_thread, Thread* to_thread);
[[noreturn]] static void assume_context(Thread& thread, u32 flags);
u32 init_context(Thread& thread, bool leave_crit);
+ static bool get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip);
void set_thread_specific(u8* data, size_t len);
};
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 532346f472..cfbe113783 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -739,7 +739,7 @@ void Thread::notify_finalizer()
g_finalizer_wait_queue->wake_all();
}
-String Thread::backtrace(ProcessInspectionHandle&) const
+String Thread::backtrace(ProcessInspectionHandle&)
{
return backtrace_impl();
}
@@ -775,37 +775,37 @@ static bool symbolicate(const RecognizedSymbol& symbol, const Process& process,
return true;
}
-String Thread::backtrace_impl() const
+String Thread::backtrace_impl()
{
Vector<RecognizedSymbol, 128> recognized_symbols;
- u32 start_frame;
- if (Thread::current() == this) {
- asm volatile("movl %%ebp, %%eax"
- : "=a"(start_frame));
- } else {
- start_frame = frame_ptr();
- recognized_symbols.append({ tss().eip, symbolicate_kernel_address(tss().eip) });
- }
-
auto& process = const_cast<Process&>(this->process());
auto elf_bundle = process.elf_bundle();
ProcessPagingScope paging_scope(process);
- FlatPtr stack_ptr = start_frame;
- for (;;) {
- if (!process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2))
- break;
- FlatPtr retaddr;
-
- if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
- copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]);
- recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
- copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr);
- } else {
- memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr));
- recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
- memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr));
+ // To prevent a context switch involving this thread, which may happen
+ // on another processor, we need to acquire the scheduler lock while
+ // walking the stack
+ {
+ ScopedSpinLock lock(g_scheduler_lock);
+ FlatPtr stack_ptr, eip;
+ if (Processor::get_context_frame_ptr(*this, stack_ptr, eip)) {
+ recognized_symbols.append({ eip, symbolicate_kernel_address(eip) });
+ for (;;) {
+ if (!process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2))
+ break;
+ FlatPtr retaddr;
+
+ if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
+ copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]);
+ recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
+ copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr);
+ } else {
+ memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr));
+ recognized_symbols.append({ retaddr, symbolicate_kernel_address(retaddr) });
+ memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr));
+ }
+ }
}
}
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 5bac56e195..6203f2cdf4 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -104,7 +104,7 @@ public:
Process& process() { return m_process; }
const Process& process() const { return m_process; }
- String backtrace(ProcessInspectionHandle&) const;
+ String backtrace(ProcessInspectionHandle&);
Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
const String& name() const { return m_name; }
@@ -283,7 +283,6 @@ public:
u32 affinity() const { return m_cpu_affinity; }
void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
- u32 frame_ptr() const { return m_tss.ebp; }
u32 stack_ptr() const { return m_tss.esp; }
RegisterState& get_register_dump_from_stack();
@@ -465,7 +464,7 @@ private:
friend class WaitQueue;
bool unlock_process_if_locked(u32& prev_crit);
void relock_process(bool did_unlock, u32 prev_crit);
- String backtrace_impl() const;
+ String backtrace_impl();
void reset_fpu_state();
Process& m_process;