summaryrefslogtreecommitdiff
path: root/Kernel/Thread.cpp
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2021-07-16 15:48:22 -0600
committerAndreas Kling <kling@serenityos.org>2021-07-18 13:08:51 +0200
commitae8472f9ca1b9f79497f1bc511aac4818ef3d79b (patch)
treeaad9957bf4032017ba977fe282f4bb64c1a78b2a /Kernel/Thread.cpp
parent06ddfcde89fc649968bf3867aff628e5dcb8dba7 (diff)
downloadserenity-ae8472f9ca1b9f79497f1bc511aac4818ef3d79b.zip
Kernel: Fix blocking relock of the big_lock while unlocking other Lock
When a Thread is being unblocked and we need to re-lock the process big_lock and re-locking blocks again, then we may end up in Thread::block again while still servicing the original lock's Thread::block. So permit recursion as long as it's only the big_lock that we block on again. Fixes #8822
Diffstat (limited to 'Kernel/Thread.cpp')
-rw-r--r--Kernel/Thread.cpp15
1 files changed, 9 insertions, 6 deletions
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index b4ffcd8da1..183fb82e4a 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -180,8 +180,6 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
VERIFY(!s_mm_lock.own_lock());
ScopedSpinLock block_lock(m_block_lock);
- VERIFY(!m_in_block);
- m_in_block = true;
ScopedSpinLock scheduler_lock(g_scheduler_lock);
@@ -195,7 +193,14 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
default:
VERIFY_NOT_REACHED();
}
- VERIFY(!m_blocking_lock);
+
+ // If we're blocking on the big-lock we may actually be in the process
+ // of unblocking from another lock. If that's the case m_blocking_lock
+ // is already set
+ auto& big_lock = process().big_lock();
+ VERIFY((&lock == &big_lock && m_blocking_lock != &big_lock) || !m_blocking_lock);
+
+ auto previous_blocking_lock = m_blocking_lock;
m_blocking_lock = &lock;
m_lock_requested_count = lock_count;
@@ -208,7 +213,6 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
dbgln_if(THREAD_DEBUG, "Thread {} blocking on Mutex {}", *this, &lock);
- auto& big_lock = process().big_lock();
for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY(!g_scheduler_lock.own_lock());
@@ -230,8 +234,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
}
VERIFY(!m_blocking_lock);
- VERIFY(m_in_block);
- m_in_block = false;
+ m_blocking_lock = previous_blocking_lock;
break;
}