diff options
author | Andreas Kling <kling@serenityos.org> | 2022-05-07 11:59:46 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-06-15 17:15:04 +0200 |
commit | 9e994da2acdec9807a82b3e3356876a2e1dd59b0 (patch) | |
tree | 5e190d14583508b7310e07526de78be5ffd3e0d5 /Kernel | |
parent | b47fbea9ad699b055861368f6712daf30c4970f9 (diff) | |
download | serenity-9e994da2acdec9807a82b3e3356876a2e1dd59b0.zip |
Kernel+AK: Split Weakable.h into userspace and kernel variants
Only the kernel expects AK::Weakable to lock its refcount manipulation,
so let's not force userspace to pay for that as well.
Diffstat (limited to 'Kernel')
-rw-r--r-- | Kernel/Library/ThreadSafeWeakable.h | 124 |
1 files changed, 124 insertions, 0 deletions
diff --git a/Kernel/Library/ThreadSafeWeakable.h b/Kernel/Library/ThreadSafeWeakable.h new file mode 100644 index 0000000000..3abbcea2d1 --- /dev/null +++ b/Kernel/Library/ThreadSafeWeakable.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <AK/Assertions.h> +#include <AK/Atomic.h> +#include <AK/RefCounted.h> +#include <AK/RefPtr.h> +#include <AK/StdLibExtras.h> +#include <Kernel/Arch/Processor.h> +#include <Kernel/Arch/ScopedCritical.h> + +namespace AK { + +template<typename T> +class Weakable; +template<typename T> +class WeakPtr; + +class WeakLink : public RefCounted<WeakLink> { + template<typename T> + friend class Weakable; + template<typename T> + friend class WeakPtr; + +public: + template<typename T, typename PtrTraits = RefPtrTraits<T>> + RefPtr<T, PtrTraits> strong_ref() const + requires(IsBaseOf<RefCountedBase, T>) + { + RefPtr<T, PtrTraits> ref; + + { + // We don't want to be preempted while we are trying to obtain + // a strong reference + Kernel::ScopedCritical critical; + if (!(m_consumers.fetch_add(1u << 1, AK::MemoryOrder::memory_order_acquire) & 1u)) { + T* ptr = (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire); + if (ptr && ptr->try_ref()) + ref = adopt_ref(*ptr); + } + m_consumers.fetch_sub(1u << 1, AK::MemoryOrder::memory_order_release); + } + + return ref; + } + + template<typename T> + T* unsafe_ptr() const + { + if (m_consumers.load(AK::MemoryOrder::memory_order_relaxed) & 1u) + return nullptr; + // NOTE: This may return a non-null pointer even if revocation + // has been triggered as there is a possible race! But it's "unsafe" + // anyway because we return a raw pointer without ensuring a + // reference... + return (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire); + } + + bool is_null() const + { + return unsafe_ptr<void>() == nullptr; + } + + void revoke() + { + auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed); + VERIFY(!(current_consumers & 1u)); + // We flagged revocation, now wait until everyone trying to obtain + // a strong reference is done + while (current_consumers > 0) { + Kernel::Processor::wait_check(); + current_consumers = m_consumers.load(AK::MemoryOrder::memory_order_acquire) & ~1u; + } + // No one is trying to use it (anymore) + m_ptr.store(nullptr, AK::MemoryOrder::memory_order_release); + } + +private: + template<typename T> + explicit WeakLink(T& weakable) + : m_ptr(&weakable) + { + } + mutable Atomic<void*> m_ptr; + mutable Atomic<unsigned> m_consumers; // LSB indicates revocation in progress +}; + +template<typename T> +class Weakable { +private: + class Link; + +public: + template<typename U = T> + ErrorOr<WeakPtr<U>> try_make_weak_ptr() const; + +protected: + Weakable() = default; + + ~Weakable() + { + m_being_destroyed.store(true, AK::MemoryOrder::memory_order_release); + revoke_weak_ptrs(); + } + + void revoke_weak_ptrs() + { + if (auto link = move(m_link)) + link->revoke(); + } + +private: + mutable RefPtr<WeakLink> m_link; + Atomic<bool> m_being_destroyed { false }; +}; + +} + +using AK::Weakable; |