summaryrefslogtreecommitdiff
path: root/Kernel/FutexQueue.cpp
blob: 16c7cf45b9a2be62b9ca3003ec0edb51d7f290c0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
/*
 * Copyright (c) 2020, the SerenityOS developers.
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#include <Kernel/Debug.h>
#include <Kernel/FutexQueue.h>
#include <Kernel/Thread.h>

namespace Kernel {

bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
{
    VERIFY(data != nullptr); // Thread that is requesting to be blocked
    VERIFY(m_lock.is_locked());
    VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);

    dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: should block thread {}", this, *static_cast<Thread*>(data));

    return true;
}

u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target)
{
    is_empty_target = false;
    ScopedSpinLock lock(m_lock);

    dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count);

    u32 did_wake = 0, did_requeue = 0;
    do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
        VERIFY(data);
        VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
        auto& blocker = static_cast<Thread::FutexBlocker&>(b);

        dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue unblocking {}", this, *static_cast<Thread*>(data));
        VERIFY(did_wake < wake_count);
        if (blocker.unblock()) {
            if (++did_wake >= wake_count)
                stop_iterating = true;
            return true;
        }
        return false;
    });
    is_empty = is_empty_locked();
    if (requeue_count > 0) {
        auto blockers_to_requeue = do_take_blockers(requeue_count);
        if (!blockers_to_requeue.is_empty()) {
            if (auto* target_futex_queue = get_target_queue()) {
                dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue requeueing {} blockers to {}", this, blockers_to_requeue.size(), target_futex_queue);

                // While still holding m_lock, notify each blocker
                for (auto& info : blockers_to_requeue) {
                    VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
                    auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
                    blocker.begin_requeue();
                }

                lock.unlock();
                did_requeue = blockers_to_requeue.size();

                ScopedSpinLock target_lock(target_futex_queue->m_lock);
                // Now that we have the lock of the target, append the blockers
                // and notify them that they completed the move
                for (auto& info : blockers_to_requeue) {
                    VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
                    auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
                    blocker.finish_requeue(*target_futex_queue);
                }
                target_futex_queue->do_append_blockers(move(blockers_to_requeue));
                is_empty_target = target_futex_queue->is_empty_locked();
            } else {
                dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue could not get target queue to requeue {} blockers", this, blockers_to_requeue.size());
                do_append_blockers(move(blockers_to_requeue));
            }
        }
    }
    return did_wake + did_requeue;
}

u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_empty)
{
    if (wake_count == 0)
        return 0; // should we assert instead?
    ScopedSpinLock lock(m_lock);
    dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count);
    u32 did_wake = 0;
    do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
        VERIFY(data);
        VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
        auto& blocker = static_cast<Thread::FutexBlocker&>(b);

        dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n unblocking {}", this, *static_cast<Thread*>(data));
        VERIFY(did_wake < wake_count);
        if (bitset.has_value() ? blocker.unblock_bitset(bitset.value()) : blocker.unblock()) {
            if (++did_wake >= wake_count)
                stop_iterating = true;
            return true;
        }
        return false;
    });
    is_empty = is_empty_locked();
    return did_wake;
}

u32 FutexQueue::wake_all(bool& is_empty)
{
    ScopedSpinLock lock(m_lock);
    dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this);
    u32 did_wake = 0;
    do_unblock([&](Thread::Blocker& b, void* data, bool&) {
        VERIFY(data);
        VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
        auto& blocker = static_cast<Thread::FutexBlocker&>(b);
        dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all unblocking {}", this, *static_cast<Thread*>(data));
        if (blocker.unblock(true)) {
            did_wake++;
            return true;
        }
        return false;
    });
    is_empty = is_empty_locked();
    return did_wake;
}

}