summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Syscalls/sched.cpp2
-rw-r--r--Kernel/Thread.cpp11
-rw-r--r--Kernel/Thread.h12
3 files changed, 17 insertions, 8 deletions
diff --git a/Kernel/Syscalls/sched.cpp b/Kernel/Syscalls/sched.cpp
index 5c1aa67b79..63c16408f6 100644
--- a/Kernel/Syscalls/sched.cpp
+++ b/Kernel/Syscalls/sched.cpp
@@ -12,7 +12,7 @@ KResultOr<FlatPtr> Process::sys$yield()
{
VERIFY_NO_PROCESS_BIG_LOCK(this);
REQUIRE_PROMISE(stdio);
- Thread::current()->yield_assuming_not_holding_big_lock();
+ Thread::current()->yield_without_releasing_big_lock();
return 0;
}
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 4a72194090..b580e6445e 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -220,7 +220,10 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
// We need to release the big lock
yield_and_release_relock_big_lock();
} else {
- yield_assuming_not_holding_big_lock();
+ // By the time we've reached this another thread might have
+ // marked us as holding the big lock, so this call must not
+ // verify that we're not holding it.
+ yield_without_releasing_big_lock(VerifyLockNotHeld::No);
}
VERIFY(Processor::in_critical());
@@ -412,10 +415,10 @@ void Thread::exit(void* exit_value)
die_if_needed();
}
-void Thread::yield_assuming_not_holding_big_lock()
+void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
{
VERIFY(!g_scheduler_lock.own_lock());
- VERIFY(!process().big_lock().own_lock());
+ VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().own_lock());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
@@ -613,7 +616,7 @@ void Thread::check_dispatch_pending_signal()
switch (result) {
case DispatchSignalResult::Yield:
- yield_assuming_not_holding_big_lock();
+ yield_without_releasing_big_lock();
break;
default:
break;
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index aa7ced83a9..b7431d0b40 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -836,7 +836,7 @@ public:
while (state() == Thread::Stopped) {
lock.unlock();
// We shouldn't be holding the big lock here
- yield_assuming_not_holding_big_lock();
+ yield_without_releasing_big_lock();
lock.lock();
}
}
@@ -922,7 +922,7 @@ public:
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::in_critical());
- yield_assuming_not_holding_big_lock();
+ yield_without_releasing_big_lock();
VERIFY(Processor::in_critical());
ScopedSpinLock block_lock2(m_block_lock);
@@ -1371,7 +1371,13 @@ private:
bool m_is_profiling_suppressed { false };
void yield_and_release_relock_big_lock();
- void yield_assuming_not_holding_big_lock();
+
+ enum class VerifyLockNotHeld {
+ Yes,
+ No
+ };
+
+ void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
void drop_thread_count(bool);
public: