blob: 97249a2a69862633183c116a7daa143474089da4 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
|
/*
* Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Vector.h>
#include <LibCore/DeferredInvocationContext.h>
#include <LibCore/Object.h>
#include <LibCore/Promise.h>
#include <LibCore/ThreadEventQueue.h>
#include <LibThreading/Mutex.h>
namespace Core {
struct ThreadEventQueue::Private {
struct QueuedEvent {
AK_MAKE_NONCOPYABLE(QueuedEvent);
public:
QueuedEvent(Object& receiver, NonnullOwnPtr<Event> event)
: receiver(receiver)
, event(move(event))
{
}
QueuedEvent(QueuedEvent&& other)
: receiver(other.receiver)
, event(move(other.event))
{
}
~QueuedEvent() = default;
WeakPtr<Object> receiver;
NonnullOwnPtr<Event> event;
};
Threading::Mutex mutex;
Vector<QueuedEvent, 128> queued_events;
Vector<NonnullRefPtr<Promise<NonnullRefPtr<Object>>>, 16> pending_promises;
bool warned_promise_count { false };
};
static thread_local ThreadEventQueue* s_current_thread_event_queue;
ThreadEventQueue& ThreadEventQueue::current()
{
if (!s_current_thread_event_queue) {
// FIXME: Don't leak these.
s_current_thread_event_queue = new ThreadEventQueue;
}
return *s_current_thread_event_queue;
}
ThreadEventQueue::ThreadEventQueue()
: m_private(make<Private>())
{
}
ThreadEventQueue::~ThreadEventQueue() = default;
void ThreadEventQueue::post_event(Core::Object& receiver, NonnullOwnPtr<Core::Event> event)
{
Threading::MutexLocker lock(m_private->mutex);
m_private->queued_events.empend(receiver, move(event));
}
void ThreadEventQueue::add_job(NonnullRefPtr<Promise<NonnullRefPtr<Object>>> promise)
{
Threading::MutexLocker lock(m_private->mutex);
m_private->pending_promises.append(move(promise));
}
size_t ThreadEventQueue::process()
{
decltype(m_private->queued_events) events;
{
Threading::MutexLocker locker(m_private->mutex);
events = move(m_private->queued_events);
m_private->pending_promises.remove_all_matching([](auto& job) { return job->is_resolved() || job->is_canceled(); });
}
size_t processed_events = 0;
for (size_t i = 0; i < events.size(); ++i) {
auto& queued_event = events.at(i);
auto receiver = queued_event.receiver.strong_ref();
auto& event = *queued_event.event;
if (!receiver) {
switch (event.type()) {
case Event::Quit:
VERIFY_NOT_REACHED();
default:
dbgln("ThreadEventQueue::process: Event of type {} with no receiver", event.type());
break;
}
} else if (event.type() == Event::Type::DeferredInvoke) {
static_cast<DeferredInvocationEvent&>(event).m_invokee();
} else {
NonnullRefPtr<Object> protector(*receiver);
receiver->dispatch_event(event);
}
++processed_events;
}
{
Threading::MutexLocker locker(m_private->mutex);
if (m_private->pending_promises.size() > 30 && !m_private->warned_promise_count) {
m_private->warned_promise_count = true;
dbgln("ThreadEventQueue::process: Job queue wasn't designed for this load ({} promises)", m_private->pending_promises.size());
}
}
return processed_events;
}
bool ThreadEventQueue::has_pending_events() const
{
Threading::MutexLocker locker(m_private->mutex);
return !m_private->queued_events.is_empty();
}
}
|