summaryrefslogtreecommitdiff
path: root/Kernel/Thread.h
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-15 12:38:02 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-15 12:44:35 +0200
commit37304203dd66ac3c724f963cf2c041e586da3524 (patch)
tree3a6b188b1e51cfab33ae1866d52b101cf1792fcb /Kernel/Thread.h
parent90c7307c6c65da9d6e0c4d5987f842c6a8d7ee8c (diff)
downloadserenity-37304203dd66ac3c724f963cf2c041e586da3524.zip
Kernel: Lock thread list while in Thread::unref()
This patch does three things: - Convert the global thread list from a HashMap to an IntrusiveList - Combine the thread list and its lock into a SpinLockProtectedValue - Customize Thread::unref() so it locks the list while unreffing This closes the same race window for Thread as @sin-ack's recent changes did for Process. Note that the HashMap->IntrusiveList conversion means that we lose O(1) lookups, but the majority of clients of this list are doing traversal, not lookup. Once we have an intrusive hashing solution, we should port this to use that, but for now, this gets rid of heap allocations during a sensitive time.
Diffstat (limited to 'Kernel/Thread.h')
-rw-r--r--Kernel/Thread.h60
1 files changed, 35 insertions, 25 deletions
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 4d8989deb3..0f35f917d1 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -26,6 +26,7 @@
#include <Kernel/KString.h>
#include <Kernel/Locking/LockLocation.h>
#include <Kernel/Locking/LockMode.h>
+#include <Kernel/Locking/SpinLockProtectedValue.h>
#include <Kernel/Memory/VirtualRange.h>
#include <Kernel/Scheduler.h>
#include <Kernel/TimerQueue.h>
@@ -130,7 +131,7 @@ struct ThreadRegisters {
};
class Thread
- : public RefCounted<Thread>
+ : public RefCountedBase
, public Weakable<Thread> {
AK_MAKE_NONCOPYABLE(Thread);
AK_MAKE_NONMOVABLE(Thread);
@@ -141,10 +142,9 @@ class Thread
friend class Scheduler;
friend struct ThreadReadyQueue;
- static SpinLock<u8> g_tid_map_lock;
- static HashMap<ThreadID, Thread*>* g_tid_map;
-
public:
+ bool unref() const;
+
inline static Thread* current()
{
return Processor::current_thread();
@@ -1372,8 +1372,14 @@ private:
void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
void drop_thread_count(bool);
+ mutable IntrusiveListNode<Thread> m_global_thread_list_node;
+
public:
using ListInProcess = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node>;
+ using GlobalList = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_global_thread_list_node>;
+
+private:
+ static SpinLockProtectedValue<GlobalList>& all_threads();
};
AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
@@ -1381,37 +1387,41 @@ AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
template<IteratorFunction<Thread&> Callback>
inline IterationDecision Thread::for_each(Callback callback)
{
- ScopedSpinLock lock(g_tid_map_lock);
- for (auto& it : *g_tid_map) {
- IterationDecision decision = callback(*it.value);
- if (decision != IterationDecision::Continue)
- return decision;
- }
- return IterationDecision::Continue;
+ return all_threads().with([&](auto& list) -> IterationDecision {
+ for (auto& thread : list) {
+ IterationDecision decision = callback(thread);
+ if (decision != IterationDecision::Continue)
+ return decision;
+ }
+ return IterationDecision::Continue;
+ });
}
template<IteratorFunction<Thread&> Callback>
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
{
- ScopedSpinLock lock(g_tid_map_lock);
- for (auto& it : *g_tid_map) {
- auto& thread = *it.value;
- if (thread.state() != state)
- continue;
- IterationDecision decision = callback(thread);
- if (decision != IterationDecision::Continue)
- return decision;
- }
- return IterationDecision::Continue;
+ return all_threads().with([&](auto& list) -> IterationDecision {
+ for (auto& thread : list) {
+ if (thread.state() != state)
+ continue;
+ IterationDecision decision = callback(thread);
+ if (decision != IterationDecision::Continue)
+ return decision;
+ }
+ return IterationDecision::Continue;
+ });
}
template<VoidFunction<Thread&> Callback>
inline IterationDecision Thread::for_each(Callback callback)
{
- ScopedSpinLock lock(g_tid_map_lock);
- for (auto& it : *g_tid_map)
- callback(*it.value);
- return IterationDecision::Continue;
+ return all_threads().with([&](auto& list) {
+ for (auto& thread : list) {
+ if (callback(thread) == IterationDecision::Break)
+ return IterationDecision::Break;
+ }
+ return IterationDecision::Continue;
+ });
}
template<VoidFunction<Thread&> Callback>