summaryrefslogtreecommitdiff
path: root/Kernel/TimerQueue.h
blob: 049f95b1dad774baf51721efac4d00723e1476c2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#pragma once

#include <AK/AtomicRefCounted.h>
#include <AK/Function.h>
#include <AK/IntrusiveList.h>
#include <AK/OwnPtr.h>
#include <AK/Time.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Time/TimeManagement.h>

namespace Kernel {

AK_TYPEDEF_DISTINCT_ORDERED_ID(u64, TimerId);

class Timer final : public AtomicRefCounted<Timer> {
    friend class TimerQueue;

public:
    void setup(clockid_t clock_id, Time expires, Function<void()>&& callback)
    {
        VERIFY(!is_queued());
        m_clock_id = clock_id;
        m_expires = expires;
        m_callback = move(callback);
    }

    ~Timer()
    {
        VERIFY(!is_queued());
    }

    Time remaining() const;

private:
    TimerId m_id;
    clockid_t m_clock_id;
    Time m_expires;
    Time m_remaining {};
    Function<void()> m_callback;
    Atomic<bool> m_cancelled { false };
    Atomic<bool> m_callback_finished { false };
    Atomic<bool> m_in_use { false };

    bool operator<(Timer const& rhs) const
    {
        return m_expires < rhs.m_expires;
    }
    bool operator>(Timer const& rhs) const
    {
        return m_expires > rhs.m_expires;
    }
    bool operator==(Timer const& rhs) const
    {
        return m_id == rhs.m_id;
    }

    void clear_cancelled() { return m_cancelled.store(false, AK::memory_order_release); }
    bool set_cancelled() { return m_cancelled.exchange(true, AK::memory_order_acq_rel); }

    bool is_in_use() { return m_in_use.load(AK::memory_order_acquire); };
    void set_in_use() { m_in_use.store(true, AK::memory_order_release); }
    void clear_in_use() { return m_in_use.store(false, AK::memory_order_release); }

    bool is_callback_finished() const { return m_callback_finished.load(AK::memory_order_acquire); }
    void clear_callback_finished() { m_callback_finished.store(false, AK::memory_order_release); }
    void set_callback_finished() { m_callback_finished.store(true, AK::memory_order_release); }

    Time now(bool) const;

    bool is_queued() const { return m_list_node.is_in_list(); }

public:
    IntrusiveListNode<Timer> m_list_node;
    using List = IntrusiveList<&Timer::m_list_node>;
};

class TimerQueue {
    friend class Timer;

public:
    TimerQueue();
    static TimerQueue& the();

    TimerId add_timer(NonnullRefPtr<Timer>&&);
    bool add_timer_without_id(NonnullRefPtr<Timer>, clockid_t, Time const&, Function<void()>&&);
    bool cancel_timer(Timer& timer, bool* was_in_use = nullptr);
    void fire();

private:
    struct Queue {
        Timer::List list;
        Time next_timer_due {};
    };
    void remove_timer_locked(Queue&, Timer&);
    void update_next_timer_due(Queue&);
    void add_timer_locked(NonnullRefPtr<Timer>);

    Queue& queue_for_timer(Timer& timer)
    {
        switch (timer.m_clock_id) {
        case CLOCK_MONOTONIC:
        case CLOCK_MONOTONIC_COARSE:
        case CLOCK_MONOTONIC_RAW:
            return m_timer_queue_monotonic;
        case CLOCK_REALTIME:
        case CLOCK_REALTIME_COARSE:
            return m_timer_queue_realtime;
        default:
            VERIFY_NOT_REACHED();
        }
    }

    u64 m_timer_id_count { 0 };
    u64 m_ticks_per_second { 0 };
    Queue m_timer_queue_monotonic;
    Queue m_timer_queue_realtime;
    Timer::List m_timers_executing;
};

}