summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-10-31 19:32:38 -0600
committerAndreas Kling <kling@serenityos.org>2020-11-11 12:27:25 +0100
commit3ee7c21faec5f587887eeb7790724e163b373f98 (patch)
treee4724ddb63e2d21af986d3d5059d8b688592054e
parent5b38132e3cf5f6bedc045c693577515a192db5ab (diff)
downloadserenity-3ee7c21faec5f587887eeb7790724e163b373f98.zip
Kernel: Implement capturing stack trace on a different CPU
When trying to get a stack trace of a thread on another CPU we send a SMP message to that processor to capture the stack trace for us.
-rw-r--r--Kernel/Arch/i386/CPU.cpp73
-rw-r--r--Kernel/Arch/i386/CPU.h20
2 files changed, 86 insertions, 7 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index af1fa6e01e..d162a0d0e8 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -1279,8 +1279,9 @@ const DescriptorTablePointer& Processor::get_gdtr()
return m_gdtr;
}
-bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
+bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip, bool from_other_processor)
{
+ bool ret = true;
ScopedCritical critical;
auto& proc = Processor::current();
if (&thread == proc.current_thread()) {
@@ -1288,6 +1289,12 @@ bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
asm volatile("movl %%ebp, %%eax"
: "=g"(frame_ptr));
} else {
+ // If this triggered from another processor, we should never
+ // hit this code path because the other processor is still holding
+ // the scheduler lock, which should prevent us from switching
+ // contexts
+ ASSERT(!from_other_processor);
+
// Since the thread may be running on another processor, there
// is a chance a context switch may happen while we're trying
// to get it. It also won't be entirely accurate and merely
@@ -1295,15 +1302,19 @@ bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
ScopedSpinLock lock(g_scheduler_lock);
if (thread.state() == Thread::Running) {
ASSERT(thread.cpu() != proc.id());
- // TODO: If this is the case, the thread is currently running
+ // If this is the case, the thread is currently running
// on another processor. We can't trust the kernel stack as
// it may be changing at any time. We need to probably send
// an IPI to that processor, have it walk the stack and wait
// until it returns the data back to us
- dbg() << "CPU[" << proc.id() << "] getting stack for "
- << thread << " on other CPU# " << thread.cpu() << " not yet implemented!";
- frame_ptr = eip = 0; // TODO
- return false;
+ smp_unicast(thread.cpu(),
+ [&]() {
+ dbg() << "CPU[" << Processor::current().id() << "] getting stack for cpu #" << proc.id();
+ // NOTE: Because we are holding the scheduler lock while
+ // waiting for this callback to finish, the current thread
+ // on the target processor cannot change
+ ret = get_context_frame_ptr(thread, frame_ptr, eip, true);
+ }, false);
} else {
// We need to retrieve ebp from what was last pushed to the kernel
// stack. Before switching out of that thread, it switch_context
@@ -1903,6 +1914,56 @@ void Processor::smp_broadcast(void (*callback)(), bool async)
smp_broadcast_message(msg, async);
}
+void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
+{
+ auto& cur_proc = Processor::current();
+ ASSERT(cpu != cur_proc.id());
+ auto& target_proc = processors()[cpu];
+ msg.async = async;
+#ifdef SMP_DEBUG
+ dbg() << "SMP[" << cur_proc.id() << "]: Send message " << VirtualAddress(&msg) << " to cpu #" << cpu << " proc: " << VirtualAddress(&target_proc);
+#endif
+ atomic_store(&msg.refs, 1u, AK::MemoryOrder::memory_order_release);
+ if (target_proc->smp_queue_message(msg)) {
+ APIC::the().send_ipi(cpu);
+ }
+
+ if (!async) {
+ // If synchronous then we must cleanup and return the message back
+ // to the pool. Otherwise, the last processor to complete it will return it
+ while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
+ // TODO: pause for a bit?
+
+ // We need to check here if another processor may have requested
+ // us to halt before this message could be delivered. Otherwise
+ // we're just spinning the CPU because msg.refs will never drop to 0.
+ if (cur_proc.m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed))
+ halt_this();
+ }
+
+ smp_cleanup_message(msg);
+ smp_return_to_pool(msg);
+ }
+}
+
+void Processor::smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async)
+{
+ auto& msg = smp_get_from_pool();
+ msg.type = ProcessorMessage::CallbackWithData;
+ msg.callback_with_data.handler = callback;
+ msg.callback_with_data.data = data;
+ msg.callback_with_data.free = free_data;
+ smp_unicast_message(cpu, msg, async);
+}
+
+void Processor::smp_unicast(u32 cpu, void (*callback)(), bool async)
+{
+ auto& msg = smp_get_from_pool();
+ msg.type = ProcessorMessage::CallbackWithData;
+ msg.callback.handler = callback;
+ smp_unicast_message(cpu, msg, async);
+}
+
void Processor::smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count)
{
auto& msg = smp_get_from_pool();
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index 2fbff06fd6..9ae0f3941b 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -739,6 +739,7 @@ class Processor {
static ProcessorMessage& smp_get_from_pool();
static void smp_cleanup_message(ProcessorMessage& msg);
bool smp_queue_message(ProcessorMessage& msg);
+ static void smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async);
static void smp_broadcast_message(ProcessorMessage& msg, bool async);
static void smp_broadcast_halt();
@@ -965,6 +966,23 @@ public:
}
static void smp_broadcast(void (*callback)(), bool async);
static void smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
+ template<typename Callback>
+ static void smp_unicast(u32 cpu, Callback callback, bool async)
+ {
+ auto* data = new Callback(move(callback));
+ smp_unicast(
+ cpu,
+ [](void* data) {
+ (*reinterpret_cast<Callback*>(data))();
+ },
+ data,
+ [](void* data) {
+ delete reinterpret_cast<Callback*>(data);
+ },
+ async);
+ }
+ static void smp_unicast(u32 cpu, void (*callback)(), bool async);
+ static void smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count);
template<typename Callback>
@@ -999,7 +1017,7 @@ public:
void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, u32 flags);
u32 init_context(Thread& thread, bool leave_crit);
- static bool get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip);
+ static bool get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip, bool = false);
void set_thread_specific(u8* data, size_t len);
};