From 37304203dd66ac3c724f963cf2c041e586da3524 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 15 Aug 2021 12:38:02 +0200 Subject: Kernel: Lock thread list while in Thread::unref() This patch does three things: - Convert the global thread list from a HashMap to an IntrusiveList - Combine the thread list and its lock into a SpinLockProtectedValue - Customize Thread::unref() so it locks the list while unreffing This closes the same race window for Thread as @sin-ack's recent changes did for Process. Note that the HashMap->IntrusiveList conversion means that we lose O(1) lookups, but the majority of clients of this list are doing traversal, not lookup. Once we have an intrusive hashing solution, we should port this to use that, but for now, this gets rid of heap allocations during a sensitive time. --- Kernel/Scheduler.cpp | 1 + Kernel/Thread.cpp | 53 ++++++++++++++++++++++------------------------ Kernel/Thread.h | 60 ++++++++++++++++++++++++++++++---------------------- Kernel/init.cpp | 1 - 4 files changed, 61 insertions(+), 54 deletions(-) diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index f53acb27a4..426ba3546f 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -605,6 +605,7 @@ void dump_thread_list(bool with_stack_traces) } if (with_stack_traces) dbgln("{}", thread.backtrace()); + return IterationDecision::Continue; }); } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index b1730c1398..59389476b1 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -29,12 +30,22 @@ namespace Kernel { -SpinLock Thread::g_tid_map_lock; -READONLY_AFTER_INIT HashMap* Thread::g_tid_map; +static Singleton> s_list; -UNMAP_AFTER_INIT void Thread::initialize() +SpinLockProtectedValue& Thread::all_threads() { - g_tid_map = new HashMap(); + return *s_list; +} + +bool Thread::unref() const +{ + return all_threads().with([&](auto&) { + if (deref_base()) + return false; + m_global_thread_list_node.remove(); + delete this; + return true; + }); } KResultOr> Thread::try_create(NonnullRefPtr process) @@ -77,11 +88,10 @@ Thread::Thread(NonnullRefPtr process, NonnullOwnPtr ker m_kernel_stack_region->set_name(KString::try_create(string)); } - { - ScopedSpinLock lock(g_tid_map_lock); - auto result = g_tid_map->set(m_tid, this); - VERIFY(result == AK::HashSetResult::InsertedNewEntry); - } + all_threads().with([&](auto& list) { + list.append(*this); + }); + if constexpr (THREAD_DEBUG) dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value()); @@ -162,11 +172,6 @@ Thread::~Thread() // We shouldn't be queued VERIFY(m_runnable_priority < 0); } - { - ScopedSpinLock lock(g_tid_map_lock); - auto result = g_tid_map->remove(m_tid); - VERIFY(result); - } } void Thread::block(Kernel::Mutex& lock, ScopedSpinLock>& lock_lock, u32 lock_count) @@ -1250,21 +1255,13 @@ KResult Thread::make_thread_specific_region(Badge) RefPtr Thread::from_tid(ThreadID tid) { - RefPtr found_thread; - { - ScopedSpinLock lock(g_tid_map_lock); - if (auto it = g_tid_map->find(tid); it != g_tid_map->end()) { - // We need to call try_ref() here as there is a window between - // dropping the last reference and calling the Thread's destructor! - // We shouldn't remove the threads from that list until it is truly - // destructed as it may stick around past finalization in order to - // be able to wait() on it! - if (it->value->try_ref()) { - found_thread = adopt_ref(*it->value); - } + return all_threads().with([&](auto& list) -> RefPtr { + for (Thread& thread : list) { + if (thread.tid() == tid) + return thread; } - } - return found_thread; + return nullptr; + }); } void Thread::reset_fpu_state() diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 4d8989deb3..0f35f917d1 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -130,7 +131,7 @@ struct ThreadRegisters { }; class Thread - : public RefCounted + : public RefCountedBase , public Weakable { AK_MAKE_NONCOPYABLE(Thread); AK_MAKE_NONMOVABLE(Thread); @@ -141,10 +142,9 @@ class Thread friend class Scheduler; friend struct ThreadReadyQueue; - static SpinLock g_tid_map_lock; - static HashMap* g_tid_map; - public: + bool unref() const; + inline static Thread* current() { return Processor::current_thread(); @@ -1372,8 +1372,14 @@ private: void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes); void drop_thread_count(bool); + mutable IntrusiveListNode m_global_thread_list_node; + public: using ListInProcess = IntrusiveList, &Thread::m_process_thread_list_node>; + using GlobalList = IntrusiveList, &Thread::m_global_thread_list_node>; + +private: + static SpinLockProtectedValue& all_threads(); }; AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags); @@ -1381,37 +1387,41 @@ AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags); template Callback> inline IterationDecision Thread::for_each(Callback callback) { - ScopedSpinLock lock(g_tid_map_lock); - for (auto& it : *g_tid_map) { - IterationDecision decision = callback(*it.value); - if (decision != IterationDecision::Continue) - return decision; - } - return IterationDecision::Continue; + return all_threads().with([&](auto& list) -> IterationDecision { + for (auto& thread : list) { + IterationDecision decision = callback(thread); + if (decision != IterationDecision::Continue) + return decision; + } + return IterationDecision::Continue; + }); } template Callback> inline IterationDecision Thread::for_each_in_state(State state, Callback callback) { - ScopedSpinLock lock(g_tid_map_lock); - for (auto& it : *g_tid_map) { - auto& thread = *it.value; - if (thread.state() != state) - continue; - IterationDecision decision = callback(thread); - if (decision != IterationDecision::Continue) - return decision; - } - return IterationDecision::Continue; + return all_threads().with([&](auto& list) -> IterationDecision { + for (auto& thread : list) { + if (thread.state() != state) + continue; + IterationDecision decision = callback(thread); + if (decision != IterationDecision::Continue) + return decision; + } + return IterationDecision::Continue; + }); } template Callback> inline IterationDecision Thread::for_each(Callback callback) { - ScopedSpinLock lock(g_tid_map_lock); - for (auto& it : *g_tid_map) - callback(*it.value); - return IterationDecision::Continue; + return all_threads().with([&](auto& list) { + for (auto& thread : list) { + if (callback(thread) == IterationDecision::Break) + return IterationDecision::Break; + } + return IterationDecision::Continue; + }); } template Callback> diff --git a/Kernel/init.cpp b/Kernel/init.cpp index bd275461d6..61c5fec4ab 100644 --- a/Kernel/init.cpp +++ b/Kernel/init.cpp @@ -207,7 +207,6 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info) __stack_chk_guard = get_fast_random(); ProcFSComponentRegistry::initialize(); - Thread::initialize(); Process::initialize(); Scheduler::initialize(); -- cgit v1.2.3