summaryrefslogtreecommitdiff
path: root/Kernel/Locking
diff options
context:
space:
mode:
authorJean-Baptiste Boric <jblbeurope@gmail.com>2021-07-18 09:10:27 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-07 11:48:00 +0200
commitf7f794e74a7fa28a93431e457959eba2bbac8c0c (patch)
tree1e185a8eb69b75da3fdb8815a828c567024049f3 /Kernel/Locking
parent479b07339c271facae062cd0cbc6ccc86a47fc71 (diff)
downloadserenity-f7f794e74a7fa28a93431e457959eba2bbac8c0c.zip
Kernel: Move Mutex into Locking/
Diffstat (limited to 'Kernel/Locking')
-rw-r--r--Kernel/Locking/Mutex.cpp432
-rw-r--r--Kernel/Locking/Mutex.h255
2 files changed, 687 insertions, 0 deletions
diff --git a/Kernel/Locking/Mutex.cpp b/Kernel/Locking/Mutex.cpp
new file mode 100644
index 0000000000..9a6266230d
--- /dev/null
+++ b/Kernel/Locking/Mutex.cpp
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifdef LOCK_DEBUG
+# include <AK/SourceLocation.h>
+#endif
+#include <Kernel/Debug.h>
+#include <Kernel/KSyms.h>
+#include <Kernel/Locking/Mutex.h>
+#include <Kernel/SpinLock.h>
+#include <Kernel/Thread.h>
+
+namespace Kernel {
+
+#if LOCK_DEBUG
+void Mutex::lock(Mode mode, const SourceLocation& location)
+#else
+void Mutex::lock(Mode mode)
+#endif
+{
+ // NOTE: This may be called from an interrupt handler (not an IRQ handler)
+ // and also from within critical sections!
+ VERIFY(!Processor::current().in_irq());
+ VERIFY(mode != Mode::Unlocked);
+ auto current_thread = Thread::current();
+
+ ScopedSpinLock lock(m_lock);
+ bool did_block = false;
+ Mode current_mode = m_mode;
+ switch (current_mode) {
+ case Mode::Unlocked: {
+ dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
+ m_mode = mode;
+ VERIFY(!m_holder);
+ VERIFY(m_shared_holders.is_empty());
+ if (mode == Mode::Exclusive) {
+ m_holder = current_thread;
+ } else {
+ VERIFY(mode == Mode::Shared);
+ m_shared_holders.set(current_thread, 1);
+ }
+ VERIFY(m_times_locked == 0);
+ m_times_locked++;
+
+#if LOCK_DEBUG
+ if (current_thread) {
+ current_thread->holding_lock(*this, 1, location);
+ }
+#endif
+ return;
+ }
+ case Mode::Exclusive: {
+ VERIFY(m_holder);
+ if (m_holder != current_thread) {
+ block(*current_thread, mode, lock, 1);
+ did_block = true;
+ // If we blocked then m_mode should have been updated to what we requested
+ VERIFY(m_mode == mode);
+ }
+
+ if (m_mode == Mode::Exclusive) {
+ VERIFY(m_holder == current_thread);
+ VERIFY(m_shared_holders.is_empty());
+ } else if (did_block && mode == Mode::Shared) {
+ // Only if we blocked trying to acquire a shared lock the lock would have been converted
+ VERIFY(!m_holder);
+ VERIFY(!m_shared_holders.is_empty());
+ VERIFY(m_shared_holders.find(current_thread) != m_shared_holders.end());
+ }
+
+ if constexpr (LOCK_TRACE_DEBUG) {
+ if (mode == Mode::Exclusive)
+ dbgln("Mutex::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
+ else
+ dbgln("Mutex::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
+ }
+
+ VERIFY(m_times_locked > 0);
+ if (!did_block) {
+ // if we didn't block we must still be an exclusive lock
+ VERIFY(m_mode == Mode::Exclusive);
+ m_times_locked++;
+ }
+
+#if LOCK_DEBUG
+ current_thread->holding_lock(*this, 1, location);
+#endif
+ return;
+ }
+ case Mode::Shared: {
+ VERIFY(!m_holder);
+ if (mode == Mode::Exclusive) {
+ if (m_shared_holders.size() == 1) {
+ auto it = m_shared_holders.begin();
+ if (it->key == current_thread) {
+ it->value++;
+ m_times_locked++;
+ m_mode = Mode::Exclusive;
+ m_holder = current_thread;
+ m_shared_holders.clear();
+ dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): acquire {}, converted shared to exclusive lock, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
+ return;
+ }
+ }
+
+ block(*current_thread, mode, lock, 1);
+ did_block = true;
+ VERIFY(m_mode == mode);
+ }
+
+ dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
+
+ VERIFY(m_times_locked > 0);
+ if (m_mode == Mode::Shared) {
+ VERIFY(!m_holder);
+ VERIFY(!did_block || m_shared_holders.contains(current_thread));
+ } else if (did_block) {
+ VERIFY(mode == Mode::Exclusive);
+ VERIFY(m_holder == current_thread);
+ VERIFY(m_shared_holders.is_empty());
+ }
+
+ if (!did_block) {
+ // if we didn't block we must still be a shared lock
+ VERIFY(m_mode == Mode::Shared);
+ m_times_locked++;
+ VERIFY(!m_shared_holders.is_empty());
+ auto it = m_shared_holders.find(current_thread);
+ if (it != m_shared_holders.end())
+ it->value++;
+ else
+ m_shared_holders.set(current_thread, 1);
+ }
+
+#if LOCK_DEBUG
+ current_thread->holding_lock(*this, 1, location);
+#endif
+ return;
+ }
+ default:
+ VERIFY_NOT_REACHED();
+ }
+}
+
+void Mutex::unlock()
+{
+ // NOTE: This may be called from an interrupt handler (not an IRQ handler)
+ // and also from within critical sections!
+ VERIFY(!Processor::current().in_irq());
+ auto current_thread = Thread::current();
+ ScopedSpinLock lock(m_lock);
+ Mode current_mode = m_mode;
+ if constexpr (LOCK_TRACE_DEBUG) {
+ if (current_mode == Mode::Shared)
+ dbgln("Mutex::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
+ else
+ dbgln("Mutex::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
+ }
+
+ VERIFY(current_mode != Mode::Unlocked);
+
+ VERIFY(m_times_locked > 0);
+ m_times_locked--;
+
+ switch (current_mode) {
+ case Mode::Exclusive:
+ VERIFY(m_holder == current_thread);
+ VERIFY(m_shared_holders.is_empty());
+ if (m_times_locked == 0)
+ m_holder = nullptr;
+ break;
+ case Mode::Shared: {
+ VERIFY(!m_holder);
+ auto it = m_shared_holders.find(current_thread);
+ VERIFY(it != m_shared_holders.end());
+ if (it->value > 1) {
+ it->value--;
+ } else {
+ VERIFY(it->value > 0);
+ m_shared_holders.remove(it);
+ }
+ break;
+ }
+ default:
+ VERIFY_NOT_REACHED();
+ }
+
+#if LOCK_DEBUG
+ if (current_thread) {
+ current_thread->holding_lock(*this, -1, {});
+ }
+#endif
+
+ if (m_times_locked == 0) {
+ VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
+
+ m_mode = Mode::Unlocked;
+ unblock_waiters(current_mode);
+ }
+}
+
+void Mutex::block(Thread& current_thread, Mode mode, ScopedSpinLock<SpinLock<u8>>& lock, u32 requested_locks)
+{
+ auto& blocked_thread_list = thread_list_for_mode(mode);
+ VERIFY(!blocked_thread_list.contains(current_thread));
+ blocked_thread_list.append(current_thread);
+
+ dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waiting...", this, m_name);
+ current_thread.block(*this, lock, requested_locks);
+ dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waited", this, m_name);
+
+ VERIFY(blocked_thread_list.contains(current_thread));
+ blocked_thread_list.remove(current_thread);
+}
+
+void Mutex::unblock_waiters(Mode previous_mode)
+{
+ VERIFY(m_times_locked == 0);
+ VERIFY(m_mode == Mode::Unlocked);
+
+ if (m_blocked_threads_list_exclusive.is_empty() && m_blocked_threads_list_shared.is_empty())
+ return;
+
+ auto unblock_shared = [&]() {
+ if (m_blocked_threads_list_shared.is_empty())
+ return false;
+ m_mode = Mode::Shared;
+ for (auto& thread : m_blocked_threads_list_shared) {
+ auto requested_locks = thread.unblock_from_lock(*this);
+ auto set_result = m_shared_holders.set(&thread, requested_locks);
+ VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
+ m_times_locked += requested_locks;
+ }
+ return true;
+ };
+ auto unblock_exclusive = [&]() {
+ if (auto* next_exclusive_thread = m_blocked_threads_list_exclusive.first()) {
+ m_mode = Mode::Exclusive;
+ m_times_locked = next_exclusive_thread->unblock_from_lock(*this);
+ m_holder = next_exclusive_thread;
+ return true;
+ }
+ return false;
+ };
+
+ if (previous_mode == Mode::Exclusive) {
+ if (!unblock_shared())
+ unblock_exclusive();
+ } else {
+ if (!unblock_exclusive())
+ unblock_shared();
+ }
+}
+
+auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
+{
+ // NOTE: This may be called from an interrupt handler (not an IRQ handler)
+ // and also from within critical sections!
+ VERIFY(!Processor::current().in_irq());
+ auto current_thread = Thread::current();
+ ScopedSpinLock lock(m_lock);
+ auto current_mode = m_mode;
+ switch (current_mode) {
+ case Mode::Exclusive: {
+ if (m_holder != current_thread) {
+ lock_count_to_restore = 0;
+ return Mode::Unlocked;
+ }
+
+ dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
+#if LOCK_DEBUG
+ m_holder->holding_lock(*this, -(int)m_times_locked, {});
+#endif
+ m_holder = nullptr;
+ VERIFY(m_times_locked > 0);
+ lock_count_to_restore = m_times_locked;
+ m_times_locked = 0;
+ m_mode = Mode::Unlocked;
+ unblock_waiters(Mode::Exclusive);
+ break;
+ }
+ case Mode::Shared: {
+ VERIFY(!m_holder);
+ auto it = m_shared_holders.find(current_thread);
+ if (it == m_shared_holders.end()) {
+ lock_count_to_restore = 0;
+ return Mode::Unlocked;
+ }
+
+ dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
+ this, it->value, m_times_locked);
+
+ VERIFY(it->value > 0);
+ lock_count_to_restore = it->value;
+ VERIFY(lock_count_to_restore > 0);
+#if LOCK_DEBUG
+ m_holder->holding_lock(*this, -(int)lock_count_to_restore, {});
+#endif
+ m_shared_holders.remove(it);
+ VERIFY(m_times_locked >= lock_count_to_restore);
+ m_times_locked -= lock_count_to_restore;
+ if (m_times_locked == 0) {
+ m_mode = Mode::Unlocked;
+ unblock_waiters(Mode::Shared);
+ }
+ break;
+ }
+ case Mode::Unlocked: {
+ lock_count_to_restore = 0;
+ break;
+ }
+ default:
+ VERIFY_NOT_REACHED();
+ }
+ return current_mode;
+}
+
+#if LOCK_DEBUG
+void Mutex::restore_lock(Mode mode, u32 lock_count, const SourceLocation& location)
+#else
+void Mutex::restore_lock(Mode mode, u32 lock_count)
+#endif
+{
+ VERIFY(mode != Mode::Unlocked);
+ VERIFY(lock_count > 0);
+ VERIFY(!Processor::current().in_irq());
+ auto current_thread = Thread::current();
+ bool did_block = false;
+ ScopedSpinLock lock(m_lock);
+ switch (mode) {
+ case Mode::Exclusive: {
+ auto previous_mode = m_mode;
+ bool need_to_block = false;
+ if (m_mode == Mode::Exclusive && m_holder != current_thread)
+ need_to_block = true;
+ else if (m_mode == Mode::Shared && (m_shared_holders.size() != 1 || !m_shared_holders.contains(current_thread)))
+ need_to_block = true;
+ if (need_to_block) {
+ block(*current_thread, Mode::Exclusive, lock, lock_count);
+ did_block = true;
+ // If we blocked then m_mode should have been updated to what we requested
+ VERIFY(m_mode == Mode::Exclusive);
+ }
+
+ dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::restore_lock @ {}: restoring {} with lock count {}, was {}", this, mode_to_string(mode), lock_count, mode_to_string(previous_mode));
+
+ VERIFY(m_mode != Mode::Shared);
+ VERIFY(m_shared_holders.is_empty());
+ if (did_block) {
+ VERIFY(m_times_locked > 0);
+ VERIFY(m_holder == current_thread);
+ } else {
+ if (m_mode == Mode::Unlocked) {
+ m_mode = Mode::Exclusive;
+ VERIFY(m_times_locked == 0);
+ m_times_locked = lock_count;
+ VERIFY(!m_holder);
+ m_holder = current_thread;
+ } else if (m_mode == Mode::Shared) {
+ // Upgrade the shared lock to an exclusive lock
+ VERIFY(!m_holder);
+ VERIFY(m_shared_holders.size() == 1);
+ VERIFY(m_shared_holders.contains(current_thread));
+ m_mode = Mode::Exclusive;
+ m_holder = current_thread;
+ m_shared_holders.clear();
+ } else {
+ VERIFY(m_mode == Mode::Exclusive);
+ VERIFY(m_holder == current_thread);
+ VERIFY(m_times_locked > 0);
+ m_times_locked += lock_count;
+ }
+ }
+
+#if LOCK_DEBUG
+ m_holder->holding_lock(*this, (int)lock_count, location);
+#endif
+ return;
+ }
+ case Mode::Shared: {
+ auto previous_mode = m_mode;
+ if (m_mode == Mode::Exclusive && m_holder != current_thread) {
+ block(*current_thread, Mode::Shared, lock, lock_count);
+ did_block = true;
+ // If we blocked then m_mode should have been updated to what we requested
+ VERIFY(m_mode == Mode::Shared);
+ }
+
+ dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::restore_lock @ {}: restoring {} with lock count {}, was {}",
+ this, mode_to_string(mode), lock_count, mode_to_string(previous_mode));
+
+ VERIFY(!m_holder);
+ if (did_block) {
+ VERIFY(m_times_locked > 0);
+ VERIFY(m_shared_holders.contains(current_thread));
+ } else {
+ if (m_mode == Mode::Unlocked) {
+ m_mode = Mode::Shared;
+ m_times_locked += lock_count;
+ auto set_result = m_shared_holders.set(current_thread, lock_count);
+ // There may be other shared lock holders already, but we should not have an entry yet
+ VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
+ } else if (m_mode == Mode::Shared) {
+ m_times_locked += lock_count;
+ if (auto it = m_shared_holders.find(current_thread); it != m_shared_holders.end()) {
+ it->value += lock_count;
+ } else {
+ auto set_result = m_shared_holders.set(current_thread, lock_count);
+ // There may be other shared lock holders already, but we should not have an entry yet
+ VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
+ }
+ } else {
+ VERIFY(m_mode == Mode::Exclusive);
+ VERIFY(m_holder == current_thread);
+ m_times_locked += lock_count;
+ }
+ }
+
+#if LOCK_DEBUG
+ m_holder->holding_lock(*this, (int)lock_count, location);
+#endif
+ return;
+ }
+ default:
+ VERIFY_NOT_REACHED();
+ }
+}
+
+}
diff --git a/Kernel/Locking/Mutex.h b/Kernel/Locking/Mutex.h
new file mode 100644
index 0000000000..28c826c1cd
--- /dev/null
+++ b/Kernel/Locking/Mutex.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Assertions.h>
+#include <AK/Atomic.h>
+#include <AK/HashMap.h>
+#include <AK/Types.h>
+#include <Kernel/Forward.h>
+#include <Kernel/Locking/LockMode.h>
+#include <Kernel/WaitQueue.h>
+
+namespace Kernel {
+
+class Mutex {
+ friend class Thread;
+
+ AK_MAKE_NONCOPYABLE(Mutex);
+ AK_MAKE_NONMOVABLE(Mutex);
+
+public:
+ using Mode = LockMode;
+
+ Mutex(const char* name = nullptr)
+ : m_name(name)
+ {
+ }
+ ~Mutex() = default;
+
+#if LOCK_DEBUG
+ void lock(Mode mode = Mode::Exclusive, const SourceLocation& location = SourceLocation::current());
+ void restore_lock(Mode, u32, const SourceLocation& location = SourceLocation::current());
+#else
+ void lock(Mode = Mode::Exclusive);
+ void restore_lock(Mode, u32);
+#endif
+
+ void unlock();
+ [[nodiscard]] Mode force_unlock_if_locked(u32&);
+ [[nodiscard]] bool is_locked() const
+ {
+ ScopedSpinLock lock(m_lock);
+ return m_mode != Mode::Unlocked;
+ }
+ [[nodiscard]] bool own_lock() const
+ {
+ ScopedSpinLock lock(m_lock);
+ if (m_mode == Mode::Exclusive)
+ return m_holder == Thread::current();
+ if (m_mode == Mode::Shared)
+ return m_shared_holders.contains(Thread::current());
+ return false;
+ }
+
+ [[nodiscard]] const char* name() const { return m_name; }
+
+ static const char* mode_to_string(Mode mode)
+ {
+ switch (mode) {
+ case Mode::Unlocked:
+ return "unlocked";
+ case Mode::Exclusive:
+ return "exclusive";
+ case Mode::Shared:
+ return "shared";
+ default:
+ return "invalid";
+ }
+ }
+
+private:
+ typedef IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_blocked_threads_list_node> BlockedThreadList;
+
+ ALWAYS_INLINE BlockedThreadList& thread_list_for_mode(Mode mode)
+ {
+ VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
+ return mode == Mode::Exclusive ? m_blocked_threads_list_exclusive : m_blocked_threads_list_shared;
+ }
+
+ void block(Thread&, Mode, ScopedSpinLock<SpinLock<u8>>&, u32);
+ void unblock_waiters(Mode);
+
+ const char* m_name { nullptr };
+ Mode m_mode { Mode::Unlocked };
+
+ // When locked exclusively, only the thread already holding the lock can
+ // lock it again. When locked in shared mode, any thread can do that.
+ u32 m_times_locked { 0 };
+
+ // One of the threads that hold this lock, or nullptr. When locked in shared
+ // mode, this is stored on best effort basis: nullptr value does *not* mean
+ // the lock is unlocked, it just means we don't know which threads hold it.
+ // When locked exclusively, this is always the one thread that holds the
+ // lock.
+ RefPtr<Thread> m_holder;
+ HashMap<Thread*, u32> m_shared_holders;
+
+ BlockedThreadList m_blocked_threads_list_exclusive;
+ BlockedThreadList m_blocked_threads_list_shared;
+
+ mutable SpinLock<u8> m_lock;
+};
+
+class MutexLocker {
+ AK_MAKE_NONCOPYABLE(MutexLocker);
+
+public:
+ ALWAYS_INLINE explicit MutexLocker()
+ : m_lock(nullptr)
+ , m_locked(false)
+ {
+ }
+
+#if LOCK_DEBUG
+ ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
+#else
+ ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive)
+#endif
+ : m_lock(&l)
+ {
+#if LOCK_DEBUG
+ m_lock->lock(mode, location);
+#else
+ m_lock->lock(mode);
+#endif
+ }
+
+ ALWAYS_INLINE ~MutexLocker()
+ {
+ if (m_locked)
+ unlock();
+ }
+
+ ALWAYS_INLINE void unlock()
+ {
+ VERIFY(m_lock);
+ VERIFY(m_locked);
+ m_locked = false;
+ m_lock->unlock();
+ }
+
+#if LOCK_DEBUG
+ ALWAYS_INLINE void attach_and_lock(Mutex& lock, Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
+#else
+ ALWAYS_INLINE void attach_and_lock(Mutex& lock, Mutex::Mode mode = Mutex::Mode::Exclusive)
+#endif
+ {
+ VERIFY(!m_locked);
+ m_lock = &lock;
+ m_locked = true;
+
+#if LOCK_DEBUG
+ m_lock->lock(mode, location);
+#else
+ m_lock->lock(mode);
+#endif
+ }
+
+#if LOCK_DEBUG
+ ALWAYS_INLINE void lock(Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
+#else
+ ALWAYS_INLINE void lock(Mutex::Mode mode = Mutex::Mode::Exclusive)
+#endif
+ {
+ VERIFY(m_lock);
+ VERIFY(!m_locked);
+ m_locked = true;
+
+#if LOCK_DEBUG
+ m_lock->lock(mode, location);
+#else
+ m_lock->lock(mode);
+#endif
+ }
+
+private:
+ Mutex* m_lock;
+ bool m_locked { true };
+};
+
+template<typename T>
+class Lockable {
+public:
+ Lockable() = default;
+ Lockable(T&& resource)
+ : m_resource(move(resource))
+ {
+ }
+ [[nodiscard]] Mutex& lock() { return m_lock; }
+ [[nodiscard]] T& resource() { return m_resource; }
+
+ [[nodiscard]] T lock_and_copy()
+ {
+ MutexLocker locker(m_lock);
+ return m_resource;
+ }
+
+private:
+ T m_resource;
+ Mutex m_lock;
+};
+
+class ScopedLockRelease {
+ AK_MAKE_NONCOPYABLE(ScopedLockRelease);
+
+public:
+ ScopedLockRelease& operator=(ScopedLockRelease&&) = delete;
+
+ ScopedLockRelease(Mutex& lock)
+ : m_lock(&lock)
+ , m_previous_mode(lock.force_unlock_if_locked(m_previous_recursions))
+ {
+ }
+
+ ScopedLockRelease(ScopedLockRelease&& from)
+ : m_lock(exchange(from.m_lock, nullptr))
+ , m_previous_mode(exchange(from.m_previous_mode, Mutex::Mode::Unlocked))
+ , m_previous_recursions(exchange(from.m_previous_recursions, 0))
+ {
+ }
+
+ ~ScopedLockRelease()
+ {
+ if (m_lock && m_previous_mode != Mutex::Mode::Unlocked)
+ m_lock->restore_lock(m_previous_mode, m_previous_recursions);
+ }
+
+ void restore_lock()
+ {
+ VERIFY(m_lock);
+ if (m_previous_mode != Mutex::Mode::Unlocked) {
+ m_lock->restore_lock(m_previous_mode, m_previous_recursions);
+ m_previous_mode = Mutex::Mode::Unlocked;
+ m_previous_recursions = 0;
+ }
+ }
+
+ void do_not_restore()
+ {
+ VERIFY(m_lock);
+ m_previous_mode = Mutex::Mode::Unlocked;
+ m_previous_recursions = 0;
+ }
+
+private:
+ Mutex* m_lock;
+ Mutex::Mode m_previous_mode;
+ u32 m_previous_recursions;
+};
+
+}