summaryrefslogtreecommitdiff
path: root/Kernel/Thread.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-29 20:10:24 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-29 22:19:42 +0200
commit68bf6db673e3a0070b5c47d7ab997d82d31e1526 (patch)
treee46af9f9ae539a76be6d50d13192f89071dacabd /Kernel/Thread.cpp
parented0e64943f7b779d2343471c76c997e4a6d3a438 (diff)
downloadserenity-68bf6db673e3a0070b5c47d7ab997d82d31e1526.zip
Kernel: Rename Spinlock::is_owned_by_current_thread()
...to is_owned_by_current_processor(). As Tom pointed out, this is much more accurate. :^)
Diffstat (limited to 'Kernel/Thread.cpp')
-rw-r--r--Kernel/Thread.cpp28
1 files changed, 14 insertions, 14 deletions
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 747f844038..22b93e5a39 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -160,7 +160,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current());
ScopedCritical critical;
- VERIFY(!Memory::s_mm_lock.is_locked_by_current_thread());
+ VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
SpinlockLocker block_lock(m_block_lock);
@@ -198,7 +198,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked.
- VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
VERIFY(Processor::in_critical());
if (&lock != &big_lock && big_lock.is_locked_by_current_thread()) {
// We're locking another lock and already hold the big lock...
@@ -239,8 +239,8 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
SpinlockLocker block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock);
VERIFY(!Processor::current_in_irq());
- VERIFY(g_scheduler_lock.is_locked_by_current_thread());
- VERIFY(m_block_lock.is_locked_by_current_thread());
+ VERIFY(g_scheduler_lock.is_locked_by_current_processor());
+ VERIFY(m_block_lock.is_locked_by_current_processor());
VERIFY(m_blocking_lock == &lock);
dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock);
m_blocking_lock = nullptr;
@@ -285,8 +285,8 @@ void Thread::unblock_from_blocker(Blocker& blocker)
void Thread::unblock(u8 signal)
{
VERIFY(!Processor::current_in_irq());
- VERIFY(g_scheduler_lock.is_locked_by_current_thread());
- VERIFY(m_block_lock.is_locked_by_current_thread());
+ VERIFY(g_scheduler_lock.is_locked_by_current_processor());
+ VERIFY(m_block_lock.is_locked_by_current_processor());
if (m_state != Thread::Blocked)
return;
if (m_blocking_lock)
@@ -402,7 +402,7 @@ void Thread::exit(void* exit_value)
void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
{
- VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().is_locked_by_current_thread());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
@@ -414,7 +414,7 @@ void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_
void Thread::yield_and_release_relock_big_lock()
{
- VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
@@ -612,7 +612,7 @@ u32 Thread::pending_signals() const
u32 Thread::pending_signals_for_state() const
{
- VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(g_scheduler_lock.is_locked_by_current_processor());
constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
if (is_handling_page_fault())
return 0;
@@ -709,7 +709,7 @@ void Thread::send_urgent_signal_to_self(u8 signal)
DispatchSignalResult Thread::dispatch_one_pending_signal()
{
- VERIFY(m_lock.is_locked_by_current_thread());
+ VERIFY(m_lock.is_locked_by_current_processor());
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
if (signal_candidates == 0)
return DispatchSignalResult::Continue;
@@ -816,7 +816,7 @@ void Thread::resume_from_stopped()
{
VERIFY(is_stopped());
VERIFY(m_stop_state != State::Invalid);
- VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(g_scheduler_lock.is_locked_by_current_processor());
if (m_stop_state == Blocked) {
SpinlockLocker block_lock(m_block_lock);
if (m_blocker || m_blocking_lock) {
@@ -834,7 +834,7 @@ void Thread::resume_from_stopped()
DispatchSignalResult Thread::dispatch_signal(u8 signal)
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(g_scheduler_lock.is_locked_by_current_processor());
VERIFY(signal > 0 && signal <= 32);
VERIFY(process().is_user_process());
VERIFY(this == Thread::current());
@@ -1047,7 +1047,7 @@ RefPtr<Thread> Thread::clone(Process& process)
void Thread::set_state(State new_state, u8 stop_signal)
{
State previous_state;
- VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(g_scheduler_lock.is_locked_by_current_processor());
if (new_state == m_state)
return;
@@ -1162,7 +1162,7 @@ String Thread::backtrace()
auto& process = const_cast<Process&>(this->process());
auto stack_trace = Processor::capture_stack_trace(*this);
- VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
ProcessPagingScope paging_scope(process);
for (auto& frame : stack_trace) {
if (Memory::is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {