diff options
author | Andreas Kling <kling@serenityos.org> | 2022-08-19 20:53:40 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-08-20 17:20:43 +0200 |
commit | 11eee67b8510767d76fb4793e3b62ac1793dd723 (patch) | |
tree | 8ce47a3813ce74bba56c60f62b29bdd6cdf287da /Kernel/Library/LockWeakable.h | |
parent | e475263113387404e63cdc3666391934604eb6e7 (diff) | |
download | serenity-11eee67b8510767d76fb4793e3b62ac1793dd723.zip |
Kernel: Make self-contained locking smart pointers their own classes
Until now, our kernel has reimplemented a number of AK classes to
provide automatic internal locking:
- RefPtr
- NonnullRefPtr
- WeakPtr
- Weakable
This patch renames the Kernel classes so that they can coexist with
the original AK classes:
- RefPtr => LockRefPtr
- NonnullRefPtr => NonnullLockRefPtr
- WeakPtr => LockWeakPtr
- Weakable => LockWeakable
The goal here is to eventually get rid of the Lock* classes in favor of
using external locking.
Diffstat (limited to 'Kernel/Library/LockWeakable.h')
-rw-r--r-- | Kernel/Library/LockWeakable.h | 124 |
1 files changed, 124 insertions, 0 deletions
diff --git a/Kernel/Library/LockWeakable.h b/Kernel/Library/LockWeakable.h new file mode 100644 index 0000000000..ed3e4367d5 --- /dev/null +++ b/Kernel/Library/LockWeakable.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <AK/Assertions.h> +#include <AK/Atomic.h> +#include <AK/AtomicRefCounted.h> +#include <AK/StdLibExtras.h> +#include <Kernel/Arch/Processor.h> +#include <Kernel/Arch/ScopedCritical.h> +#include <Kernel/Library/LockRefPtr.h> + +namespace AK { + +template<typename T> +class LockWeakable; +template<typename T> +class LockWeakPtr; + +class WeakLink final : public AtomicRefCounted<WeakLink> { + template<typename T> + friend class LockWeakable; + template<typename T> + friend class LockWeakPtr; + +public: + template<typename T, typename PtrTraits = LockRefPtrTraits<T>> + LockRefPtr<T, PtrTraits> strong_ref() const + requires(IsBaseOf<AtomicRefCountedBase, T>) + { + LockRefPtr<T, PtrTraits> ref; + + { + // We don't want to be preempted while we are trying to obtain + // a strong reference + Kernel::ScopedCritical critical; + if (!(m_consumers.fetch_add(1u << 1, AK::MemoryOrder::memory_order_acquire) & 1u)) { + T* ptr = (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire); + if (ptr && ptr->try_ref()) + ref = adopt_lock_ref(*ptr); + } + m_consumers.fetch_sub(1u << 1, AK::MemoryOrder::memory_order_release); + } + + return ref; + } + + template<typename T> + T* unsafe_ptr() const + { + if (m_consumers.load(AK::MemoryOrder::memory_order_relaxed) & 1u) + return nullptr; + // NOTE: This may return a non-null pointer even if revocation + // has been triggered as there is a possible race! But it's "unsafe" + // anyway because we return a raw pointer without ensuring a + // reference... + return (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire); + } + + bool is_null() const + { + return unsafe_ptr<void>() == nullptr; + } + + void revoke() + { + auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed); + VERIFY(!(current_consumers & 1u)); + // We flagged revocation, now wait until everyone trying to obtain + // a strong reference is done + while (current_consumers > 0) { + Kernel::Processor::wait_check(); + current_consumers = m_consumers.load(AK::MemoryOrder::memory_order_acquire) & ~1u; + } + // No one is trying to use it (anymore) + m_ptr.store(nullptr, AK::MemoryOrder::memory_order_release); + } + +private: + template<typename T> + explicit WeakLink(T& weakable) + : m_ptr(&weakable) + { + } + mutable Atomic<void*> m_ptr; + mutable Atomic<unsigned> m_consumers; // LSB indicates revocation in progress +}; + +template<typename T> +class LockWeakable { +private: + class Link; + +public: + template<typename U = T> + ErrorOr<LockWeakPtr<U>> try_make_weak_ptr() const; + +protected: + LockWeakable() = default; + + ~LockWeakable() + { + m_being_destroyed.store(true, AK::MemoryOrder::memory_order_release); + revoke_weak_ptrs(); + } + + void revoke_weak_ptrs() + { + if (auto link = move(m_link)) + link->revoke(); + } + +private: + mutable LockRefPtr<WeakLink> m_link; + Atomic<bool> m_being_destroyed { false }; +}; + +} + +using AK::LockWeakable; |