blob: 5af0b61495b8a30656e4adc16eca7caced7cc5cd (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
/*
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Spinlock.h>
namespace Kernel {
u32 Spinlock::lock()
{
u32 prev_flags = cpu_flags();
Processor::enter_critical();
cli();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
Processor::wait_check();
track_lock_acquire(m_rank);
return prev_flags;
}
void Spinlock::unlock(u32 prev_flags)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
Processor::leave_critical();
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
}
u32 RecursiveSpinlock::lock()
{
u32 prev_flags = cpu_flags();
cli();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu)
break;
Processor::wait_check();
expected = 0;
}
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
return prev_flags;
}
void RecursiveSpinlock::unlock(u32 prev_flags)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) {
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
}
Processor::leave_critical();
if ((prev_flags & 0x200) != 0)
sti();
else
cli();
}
}
|