summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-29 12:48:43 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-29 12:53:11 +0200
commit0b4671add70850387d2a2b45f6e66568b44ef75b (patch)
tree0e4031b543e4d0b077b3910a4b3292c1e64cbe98
parentd9da513959a13663b0a743103c7fc35a4322de12 (diff)
downloadserenity-0b4671add70850387d2a2b45f6e66568b44ef75b.zip
Kernel: {Mutex,Spinlock}::own_lock() => is_locked_by_current_thread()
Rename these API's to make it more clear what they are checking.
-rw-r--r--Kernel/Arch/x86/common/Interrupts.cpp2
-rw-r--r--Kernel/Interrupts/APIC.cpp2
-rw-r--r--Kernel/Locking/Mutex.h2
-rw-r--r--Kernel/Locking/Spinlock.h2
-rw-r--r--Kernel/Memory/MemoryManager.cpp24
-rw-r--r--Kernel/Memory/Region.cpp8
-rw-r--r--Kernel/Process.h4
-rw-r--r--Kernel/Scheduler.cpp10
-rw-r--r--Kernel/Syscall.cpp2
-rw-r--r--Kernel/Syscalls/execve.cpp2
-rw-r--r--Kernel/Thread.cpp34
-rw-r--r--Kernel/Thread.h10
-rw-r--r--Kernel/ThreadBlockers.cpp2
13 files changed, 52 insertions, 52 deletions
diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp
index 2ff10149ab..6fdbf51657 100644
--- a/Kernel/Arch/x86/common/Interrupts.cpp
+++ b/Kernel/Arch/x86/common/Interrupts.cpp
@@ -463,7 +463,7 @@ extern "C" void handle_interrupt(TrapFrame*) __attribute__((used));
extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
{
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
// Because init_finished() will wait on the other APs, we need
// to release the scheduler lock so that the other APs can also get
diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp
index f941028ec8..88bd33b423 100644
--- a/Kernel/Interrupts/APIC.cpp
+++ b/Kernel/Interrupts/APIC.cpp
@@ -459,7 +459,7 @@ UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
VERIFY(cpu < m_processor_enabled_cnt);
// Since we're waiting on other APs here, we shouldn't have the
// scheduler lock
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
// Notify the BSP that we are done initializing. It will unmap the startup data at P8000
m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
diff --git a/Kernel/Locking/Mutex.h b/Kernel/Locking/Mutex.h
index f64153f63f..068daa4635 100644
--- a/Kernel/Locking/Mutex.h
+++ b/Kernel/Locking/Mutex.h
@@ -42,7 +42,7 @@ public:
SpinlockLocker lock(m_lock);
return m_mode != Mode::Unlocked;
}
- [[nodiscard]] bool own_lock() const
+ [[nodiscard]] bool is_locked_by_current_thread() const
{
SpinlockLocker lock(m_lock);
if (m_mode == Mode::Exclusive)
diff --git a/Kernel/Locking/Spinlock.h b/Kernel/Locking/Spinlock.h
index f2020ef3a7..a3abb4d12e 100644
--- a/Kernel/Locking/Spinlock.h
+++ b/Kernel/Locking/Spinlock.h
@@ -100,7 +100,7 @@ public:
return m_lock.load(AK::memory_order_relaxed) != 0;
}
- [[nodiscard]] ALWAYS_INLINE bool own_lock() const
+ [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_thread() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index 9e5ad39175..3c9ed6848b 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -508,8 +508,8 @@ PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
- VERIFY(page_directory.get_lock().own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
+ VERIFY(page_directory.get_lock().is_locked_by_current_thread());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@@ -525,8 +525,8 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress
PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
- VERIFY(page_directory.get_lock().own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
+ VERIFY(page_directory.get_lock().is_locked_by_current_thread());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@@ -567,8 +567,8 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
- VERIFY(page_directory.get_lock().own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
+ VERIFY(page_directory.get_lock().is_locked_by_current_thread());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@@ -622,7 +622,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
{
- VERIFY(space.get_lock().own_lock());
+ VERIFY(space.get_lock().is_locked_by_current_thread());
return space.find_region_containing({ vaddr, 1 });
}
@@ -953,7 +953,7 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{
- VERIFY(s_mm_lock.own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
auto& mm_data = get_data();
auto& pte = boot_pd_kernel_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
@@ -979,7 +979,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{
- VERIFY(s_mm_lock.own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
auto& mm_data = get_data();
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
if (pte.physical_page_base() != pt_paddr.get()) {
@@ -1005,7 +1005,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
@@ -1026,7 +1026,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
void MemoryManager::unquickmap_page()
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
auto& mm_data = get_data();
VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
@@ -1039,7 +1039,7 @@ void MemoryManager::unquickmap_page()
bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const
{
- VERIFY(space.get_lock().own_lock());
+ VERIFY(space.get_lock().is_locked_by_current_thread());
if (!is_user_address(vaddr))
return false;
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 7130f1c5ff..e91a8056f5 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -174,7 +174,7 @@ void Region::set_should_cow(size_t page_index, bool cow)
bool Region::map_individual_page_impl(size_t page_index)
{
- VERIFY(m_page_directory->get_lock().own_lock());
+ VERIFY(m_page_directory->get_lock().is_locked_by_current_thread());
auto page_vaddr = vaddr_from_page_index(page_index);
bool user_allowed = page_vaddr.get() >= 0x00800000 && is_user_address(page_vaddr);
@@ -253,7 +253,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
void Region::set_page_directory(PageDirectory& page_directory)
{
VERIFY(!m_page_directory || m_page_directory == &page_directory);
- VERIFY(s_mm_lock.own_lock());
+ VERIFY(s_mm_lock.is_locked_by_current_thread());
m_page_directory = page_directory;
}
@@ -394,8 +394,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(vmobject().is_inode());
- VERIFY(!s_mm_lock.own_lock());
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!s_mm_lock.is_locked_by_current_thread());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
diff --git a/Kernel/Process.h b/Kernel/Process.h
index 96ec8d4ff4..698b1d431f 100644
--- a/Kernel/Process.h
+++ b/Kernel/Process.h
@@ -978,10 +978,10 @@ inline ProcessID Thread::pid() const
}
#define VERIFY_PROCESS_BIG_LOCK_ACQUIRED(process) \
- VERIFY(process->big_lock().own_lock());
+ VERIFY(process->big_lock().is_locked_by_current_thread());
#define VERIFY_NO_PROCESS_BIG_LOCK(process) \
- VERIFY(!process->big_lock().own_lock());
+ VERIFY(!process->big_lock().is_locked_by_current_thread());
inline static KResultOr<NonnullOwnPtr<KString>> try_copy_kstring_from_user(const Kernel::Syscall::StringArgument& string)
{
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index e70def131b..f6f3c74fe2 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -163,7 +163,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
void Scheduler::enqueue_runnable_thread(Thread& thread)
{
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
if (thread.is_idle_thread())
return;
auto priority = thread_priority_to_priority_index(thread.priority());
@@ -266,7 +266,7 @@ bool Scheduler::yield()
bool Scheduler::context_switch(Thread* thread)
{
- if (Memory::s_mm_lock.own_lock()) {
+ if (Memory::s_mm_lock.is_locked_by_current_thread()) {
PANIC("In context switch while holding Memory::s_mm_lock");
}
@@ -320,7 +320,7 @@ bool Scheduler::context_switch(Thread* thread)
void Scheduler::enter_current(Thread& prev_thread, bool is_first)
{
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
// We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
auto scheduler_time = Scheduler::current_time();
@@ -362,7 +362,7 @@ void Scheduler::prepare_after_exec()
{
// This is called after exec() when doing a context "switch" into
// the new process. This is called from Processor::assume_context
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
VERIFY(!Processor::current_in_scheduler());
Processor::set_current_in_scheduler(true);
@@ -372,7 +372,7 @@ void Scheduler::prepare_for_idle_loop()
{
// This is called when the CPU finished setting up the idle loop
// and is about to run it. We need to acquire he scheduler lock
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
g_scheduler_lock.lock();
VERIFY(!Processor::current_in_scheduler());
diff --git a/Kernel/Syscall.cpp b/Kernel/Syscall.cpp
index 1b89ff35dd..c9b267425c 100644
--- a/Kernel/Syscall.cpp
+++ b/Kernel/Syscall.cpp
@@ -236,7 +236,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
// Check if we're supposed to return to userspace or just die.
current_thread->die_if_needed();
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
}
}
diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp
index 5c3d0e33de..0ac2b90fc3 100644
--- a/Kernel/Syscalls/execve.cpp
+++ b/Kernel/Syscalls/execve.cpp
@@ -922,7 +922,7 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
// We need to enter the scheduler lock before changing the state
// and it will be released after the context switch into that
// thread. We should also still be in our critical section
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
VERIFY(Processor::in_critical() == 1);
g_scheduler_lock.lock();
current_thread->set_state(Thread::State::Running);
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index db39778107..747f844038 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -160,7 +160,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current());
ScopedCritical critical;
- VERIFY(!Memory::s_mm_lock.own_lock());
+ VERIFY(!Memory::s_mm_lock.is_locked_by_current_thread());
SpinlockLocker block_lock(m_block_lock);
@@ -198,9 +198,9 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked.
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
VERIFY(Processor::in_critical());
- if (&lock != &big_lock && big_lock.own_lock()) {
+ if (&lock != &big_lock && big_lock.is_locked_by_current_thread()) {
// We're locking another lock and already hold the big lock...
// We need to release the big lock
yield_and_release_relock_big_lock();
@@ -239,8 +239,8 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
SpinlockLocker block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock);
VERIFY(!Processor::current_in_irq());
- VERIFY(g_scheduler_lock.own_lock());
- VERIFY(m_block_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(m_block_lock.is_locked_by_current_thread());
VERIFY(m_blocking_lock == &lock);
dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock);
m_blocking_lock = nullptr;
@@ -285,8 +285,8 @@ void Thread::unblock_from_blocker(Blocker& blocker)
void Thread::unblock(u8 signal)
{
VERIFY(!Processor::current_in_irq());
- VERIFY(g_scheduler_lock.own_lock());
- VERIFY(m_block_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(m_block_lock.is_locked_by_current_thread());
if (m_state != Thread::Blocked)
return;
if (m_blocking_lock)
@@ -402,8 +402,8 @@ void Thread::exit(void* exit_value)
void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
{
- VERIFY(!g_scheduler_lock.own_lock());
- VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().is_locked_by_current_thread());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
@@ -414,7 +414,7 @@ void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_
void Thread::yield_and_release_relock_big_lock()
{
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
@@ -495,7 +495,7 @@ void Thread::finalize()
VERIFY(Thread::current() != this);
#if LOCK_DEBUG
- VERIFY(!m_lock.own_lock());
+ VERIFY(!m_lock.is_locked_by_current_thread());
if (lock_count() > 0) {
dbgln("Thread {} leaking {} Locks!", *this, lock_count());
SpinlockLocker list_lock(m_holding_locks_lock);
@@ -612,7 +612,7 @@ u32 Thread::pending_signals() const
u32 Thread::pending_signals_for_state() const
{
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
if (is_handling_page_fault())
return 0;
@@ -709,7 +709,7 @@ void Thread::send_urgent_signal_to_self(u8 signal)
DispatchSignalResult Thread::dispatch_one_pending_signal()
{
- VERIFY(m_lock.own_lock());
+ VERIFY(m_lock.is_locked_by_current_thread());
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
if (signal_candidates == 0)
return DispatchSignalResult::Continue;
@@ -816,7 +816,7 @@ void Thread::resume_from_stopped()
{
VERIFY(is_stopped());
VERIFY(m_stop_state != State::Invalid);
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
if (m_stop_state == Blocked) {
SpinlockLocker block_lock(m_block_lock);
if (m_blocker || m_blocking_lock) {
@@ -834,7 +834,7 @@ void Thread::resume_from_stopped()
DispatchSignalResult Thread::dispatch_signal(u8 signal)
{
VERIFY_INTERRUPTS_DISABLED();
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
VERIFY(signal > 0 && signal <= 32);
VERIFY(process().is_user_process());
VERIFY(this == Thread::current());
@@ -1047,7 +1047,7 @@ RefPtr<Thread> Thread::clone(Process& process)
void Thread::set_state(State new_state, u8 stop_signal)
{
State previous_state;
- VERIFY(g_scheduler_lock.own_lock());
+ VERIFY(g_scheduler_lock.is_locked_by_current_thread());
if (new_state == m_state)
return;
@@ -1162,7 +1162,7 @@ String Thread::backtrace()
auto& process = const_cast<Process&>(this->process());
auto stack_trace = Processor::capture_stack_trace(*this);
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
ProcessPagingScope paging_scope(process);
for (auto& frame : stack_trace) {
if (Memory::is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index dd815418cd..01a3bee63d 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -193,7 +193,7 @@ public:
StringView name() const
{
// NOTE: Whoever is calling this needs to be holding our lock while reading the name.
- VERIFY(m_lock.own_lock());
+ VERIFY(m_lock.is_locked_by_current_thread());
return m_name ? m_name->view() : StringView {};
}
@@ -839,7 +839,7 @@ public:
VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current());
ScopedCritical critical;
- VERIFY(!Memory::s_mm_lock.own_lock());
+ VERIFY(!Memory::s_mm_lock.is_locked_by_current_thread());
SpinlockLocker block_lock(m_block_lock);
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
@@ -878,8 +878,8 @@ public:
// threads to die. In that case
timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
VERIFY(!Processor::current_in_irq());
- VERIFY(!g_scheduler_lock.own_lock());
- VERIFY(!m_block_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
+ VERIFY(!m_block_lock.is_locked_by_current_thread());
// NOTE: this may execute on the same or any other processor!
SpinlockLocker scheduler_lock(g_scheduler_lock);
SpinlockLocker block_lock(m_block_lock);
@@ -907,7 +907,7 @@ public:
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked.
- VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
VERIFY(Processor::in_critical());
yield_without_releasing_big_lock();
VERIFY(Processor::in_critical());
diff --git a/Kernel/ThreadBlockers.cpp b/Kernel/ThreadBlockers.cpp
index 178f07dbf9..246e13077d 100644
--- a/Kernel/ThreadBlockers.cpp
+++ b/Kernel/ThreadBlockers.cpp
@@ -162,7 +162,7 @@ Thread::FutexBlocker::~FutexBlocker()
void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue)
{
- VERIFY(m_lock.own_lock());
+ VERIFY(m_lock.is_locked_by_current_thread());
set_blocker_set_raw_locked(&futex_queue);
// We can now release the lock
m_lock.unlock(m_relock_flags);