summaryrefslogtreecommitdiff
path: root/Kernel/WaitQueue.cpp
blob: 53834461b8383ca8de4dd301f069b3d7c627fc22 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
/*
 * Copyright (c) 2020, The SerenityOS developers.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice, this
 *    list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 *    this list of conditions and the following disclaimer in the documentation
 *    and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <Kernel/Debug.h>
#include <Kernel/Thread.h>
#include <Kernel/WaitQueue.h>

namespace Kernel {

bool WaitQueue::should_add_blocker(Thread::Blocker& b, void* data)
{
    ASSERT(data != nullptr); // Thread that is requesting to be blocked
    ASSERT(m_lock.is_locked());
    ASSERT(b.blocker_type() == Thread::Blocker::Type::Queue);
    if (m_wake_requested || !m_should_block) {
        m_wake_requested = false;
        dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: do not block thread {}, {}", this, data, m_should_block ? "wake was pending" : "not blocking");
        return false;
    }
    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: should block thread {}", this, data);
    return true;
}

u32 WaitQueue::wake_one()
{
    u32 did_wake = 0;
    ScopedSpinLock lock(m_lock);
    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one", this);
    bool did_unblock_one = do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
        ASSERT(data);
        ASSERT(b.blocker_type() == Thread::Blocker::Type::Queue);
        auto& blocker = static_cast<Thread::QueueBlocker&>(b);
        dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one unblocking {}", this, data);
        if (blocker.unblock()) {
            stop_iterating = true;
            did_wake = 1;
            return true;
        }
        return false;
    });
    m_wake_requested = !did_unblock_one;
    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one woke {} threads", this, did_wake);
    return did_wake;
}

u32 WaitQueue::wake_n(u32 wake_count)
{
    if (wake_count == 0)
        return 0; // should we assert instead?
    ScopedSpinLock lock(m_lock);
    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({})", this, wake_count);
    u32 did_wake = 0;

    bool did_unblock_some = do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
        ASSERT(data);
        ASSERT(b.blocker_type() == Thread::Blocker::Type::Queue);
        auto& blocker = static_cast<Thread::QueueBlocker&>(b);
        dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n unblocking {}", this, data);
        ASSERT(did_wake < wake_count);
        if (blocker.unblock()) {
            if (++did_wake >= wake_count)
                stop_iterating = true;
            return true;
        }
        return false;
    });
    m_wake_requested = !did_unblock_some;
    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({}) woke {} threads", this, wake_count, did_wake);
    return did_wake;
}

u32 WaitQueue::wake_all()
{
    ScopedSpinLock lock(m_lock);

    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all", this);
    u32 did_wake = 0;

    bool did_unblock_any = do_unblock([&](Thread::Blocker& b, void* data, bool&) {
        ASSERT(data);
        ASSERT(b.blocker_type() == Thread::Blocker::Type::Queue);
        auto& blocker = static_cast<Thread::QueueBlocker&>(b);

        dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all unblocking {}", this, data);

        if (blocker.unblock()) {
            did_wake++;
            return true;
        }
        return false;
    });
    m_wake_requested = !did_unblock_any;
    dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all woke {} threads", this, did_wake);
    return did_wake;
}

}