/* * Copyright (c) 2018-2020, Andreas Kling * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include #include #include #include #include #include #ifdef KERNEL # include #endif namespace AK { template class OwnPtr; template struct RefPtrTraits { ALWAYS_INLINE static T* as_ptr(FlatPtr bits) { return (T*)(bits & ~(FlatPtr)1); } ALWAYS_INLINE static FlatPtr as_bits(T* ptr) { ASSERT(!((FlatPtr)ptr & 1)); return (FlatPtr)ptr; } template ALWAYS_INLINE static FlatPtr convert_from(FlatPtr bits) { if (PtrTraits::is_null(bits)) return default_null_value; return as_bits(PtrTraits::as_ptr(bits)); } ALWAYS_INLINE static bool is_null(FlatPtr bits) { return !(bits & ~(FlatPtr)1); } ALWAYS_INLINE static FlatPtr exchange(Atomic& atomic_var, FlatPtr new_value) { // Only exchange when lock is not held ASSERT(!(new_value & 1)); FlatPtr expected = atomic_var.load(AK::MemoryOrder::memory_order_relaxed); for (;;) { expected &= ~(FlatPtr)1; // only if lock bit is not set if (atomic_var.compare_exchange_strong(expected, new_value, AK::MemoryOrder::memory_order_acq_rel)) break; #ifdef KERNEL Kernel::Processor::wait_check(); #endif } return expected; } ALWAYS_INLINE static bool exchange_if_null(Atomic& atomic_var, FlatPtr new_value) { // Only exchange when lock is not held ASSERT(!(new_value & 1)); for (;;) { FlatPtr expected = default_null_value; // only if lock bit is not set if (atomic_var.compare_exchange_strong(expected, new_value, AK::MemoryOrder::memory_order_acq_rel)) break; if (!is_null(expected)) return false; #ifdef KERNEL Kernel::Processor::wait_check(); #endif } return true; } ALWAYS_INLINE static FlatPtr lock(Atomic& atomic_var) { // This sets the lock bit atomically, preventing further modifications. // This is important when e.g. copying a RefPtr where the source // might be released and freed too quickly. This allows us // to temporarily lock the pointer so we can add a reference, then // unlock it FlatPtr bits; for (;;) { bits = atomic_var.fetch_or(1, AK::MemoryOrder::memory_order_acq_rel); if (!(bits & 1)) break; #ifdef KERNEL Kernel::Processor::wait_check(); #endif } ASSERT(!(bits & 1)); return bits; } ALWAYS_INLINE static void unlock(Atomic& atomic_var, FlatPtr new_value) { ASSERT(!(new_value & 1)); atomic_var.store(new_value, AK::MemoryOrder::memory_order_release); } static constexpr FlatPtr default_null_value = 0; using NullType = std::nullptr_t; }; template class RefPtr { template friend class RefPtr; template friend class WeakPtr; public: enum AdoptTag { Adopt }; RefPtr() = default; RefPtr(const T* ptr) : m_bits(PtrTraits::as_bits(const_cast(ptr))) { ref_if_not_null(const_cast(ptr)); } RefPtr(const T& object) : m_bits(PtrTraits::as_bits(const_cast(&object))) { T* ptr = const_cast(&object); ASSERT(ptr); ASSERT(!is_null()); ptr->ref(); } RefPtr(AdoptTag, T& object) : m_bits(PtrTraits::as_bits(&object)) { ASSERT(!is_null()); } RefPtr(RefPtr&& other) : m_bits(other.leak_ref_raw()) { } ALWAYS_INLINE RefPtr(const NonnullRefPtr& other) : m_bits(PtrTraits::as_bits(const_cast(other.add_ref()))) { } template ALWAYS_INLINE RefPtr(const NonnullRefPtr& other) : m_bits(PtrTraits::as_bits(const_cast(other.add_ref()))) { } template ALWAYS_INLINE RefPtr(NonnullRefPtr&& other) : m_bits(PtrTraits::as_bits(&other.leak_ref())) { ASSERT(!is_null()); } template> RefPtr(RefPtr&& other) : m_bits(PtrTraits::template convert_from(other.leak_ref_raw())) { } RefPtr(const RefPtr& other) : m_bits(other.add_ref_raw()) { } template> RefPtr(const RefPtr& other) : m_bits(other.add_ref_raw()) { } ALWAYS_INLINE ~RefPtr() { clear(); #ifdef SANITIZE_PTRS if constexpr (sizeof(T*) == 8) m_bits.store(0xe0e0e0e0e0e0e0e0, AK::MemoryOrder::memory_order_relaxed); else m_bits.store(0xe0e0e0e0, AK::MemoryOrder::memory_order_relaxed); #endif } template RefPtr(const OwnPtr&) = delete; template RefPtr& operator=(const OwnPtr&) = delete; void swap(RefPtr& other) { if (this == &other) return; // NOTE: swap is not atomic! FlatPtr other_bits = PtrTraits::exchange(other.m_bits, PtrTraits::default_null_value); FlatPtr bits = PtrTraits::exchange(m_bits, other_bits); PtrTraits::exchange(other.m_bits, bits); } template> void swap(RefPtr& other) { // NOTE: swap is not atomic! FlatPtr other_bits = P::exchange(other.m_bits, P::default_null_value); FlatPtr bits = PtrTraits::exchange(m_bits, PtrTraits::template convert_from(other_bits)); P::exchange(other.m_bits, P::template convert_from(bits)); } ALWAYS_INLINE RefPtr& operator=(RefPtr&& other) { if (this != &other) assign_raw(other.leak_ref_raw()); return *this; } template> ALWAYS_INLINE RefPtr& operator=(RefPtr&& other) { assign_raw(PtrTraits::template convert_from(other.leak_ref_raw())); return *this; } template ALWAYS_INLINE RefPtr& operator=(NonnullRefPtr&& other) { assign_raw(PtrTraits::as_bits(&other.leak_ref())); return *this; } ALWAYS_INLINE RefPtr& operator=(const NonnullRefPtr& other) { assign_raw(PtrTraits::as_bits(other.add_ref())); return *this; } template ALWAYS_INLINE RefPtr& operator=(const NonnullRefPtr& other) { assign_raw(PtrTraits::as_bits(other.add_ref())); return *this; } ALWAYS_INLINE RefPtr& operator=(const RefPtr& other) { if (this != &other) assign_raw(other.add_ref_raw()); return *this; } template ALWAYS_INLINE RefPtr& operator=(const RefPtr& other) { assign_raw(other.add_ref_raw()); return *this; } ALWAYS_INLINE RefPtr& operator=(const T* ptr) { ref_if_not_null(const_cast(ptr)); assign_raw(PtrTraits::as_bits(const_cast(ptr))); return *this; } ALWAYS_INLINE RefPtr& operator=(const T& object) { const_cast(object).ref(); assign_raw(PtrTraits::as_bits(const_cast(&object))); return *this; } RefPtr& operator=(std::nullptr_t) { clear(); return *this; } ALWAYS_INLINE bool assign_if_null(RefPtr&& other) { if (this == &other) return is_null(); return PtrTraits::exchange_if_null(m_bits, other.leak_ref_raw()); } template> ALWAYS_INLINE bool assign_if_null(RefPtr&& other) { if (this == &other) return is_null(); return PtrTraits::exchange_if_null(m_bits, PtrTraits::template convert_from(other.leak_ref_raw())); } ALWAYS_INLINE void clear() { assign_raw(PtrTraits::default_null_value); } bool operator!() const { return PtrTraits::is_null(m_bits.load(AK::MemoryOrder::memory_order_relaxed)); } [[nodiscard]] T* leak_ref() { FlatPtr bits = PtrTraits::exchange(m_bits, PtrTraits::default_null_value); return PtrTraits::as_ptr(bits); } NonnullRefPtr release_nonnull() { FlatPtr bits = PtrTraits::exchange(m_bits, PtrTraits::default_null_value); ASSERT(!PtrTraits::is_null(bits)); return NonnullRefPtr(NonnullRefPtr::Adopt, *PtrTraits::as_ptr(bits)); } ALWAYS_INLINE T* ptr() { return as_ptr(); } ALWAYS_INLINE const T* ptr() const { return as_ptr(); } ALWAYS_INLINE T* operator->() { return as_nonnull_ptr(); } ALWAYS_INLINE const T* operator->() const { return as_nonnull_ptr(); } ALWAYS_INLINE T& operator*() { return *as_nonnull_ptr(); } ALWAYS_INLINE const T& operator*() const { return *as_nonnull_ptr(); } ALWAYS_INLINE operator const T*() const { return as_ptr(); } ALWAYS_INLINE operator T*() { return as_ptr(); } ALWAYS_INLINE operator bool() { return !is_null(); } bool operator==(std::nullptr_t) const { return is_null(); } bool operator!=(std::nullptr_t) const { return !is_null(); } bool operator==(const RefPtr& other) const { return as_ptr() == other.as_ptr(); } bool operator!=(const RefPtr& other) const { return as_ptr() != other.as_ptr(); } bool operator==(RefPtr& other) { return as_ptr() == other.as_ptr(); } bool operator!=(RefPtr& other) { return as_ptr() != other.as_ptr(); } bool operator==(const T* other) const { return as_ptr() == other; } bool operator!=(const T* other) const { return as_ptr() != other; } bool operator==(T* other) { return as_ptr() == other; } bool operator!=(T* other) { return as_ptr() != other; } ALWAYS_INLINE bool is_null() const { return PtrTraits::is_null(m_bits.load(AK::MemoryOrder::memory_order_relaxed)); } template::value && !IsNullPointer::value>::Type* = nullptr> typename PtrTraits::NullType null_value() const { // make sure we are holding a null value FlatPtr bits = m_bits.load(AK::MemoryOrder::memory_order_relaxed); ASSERT(PtrTraits::is_null(bits)); return PtrTraits::to_null_value(bits); } template::value && !IsNullPointer::value>::Type* = nullptr> void set_null_value(typename PtrTraits::NullType value) { // make sure that new null value would be interpreted as a null value FlatPtr bits = PtrTraits::from_null_value(value); ASSERT(PtrTraits::is_null(bits)); assign_raw(bits); } private: template void do_while_locked(F f) const { #ifdef KERNEL // We don't want to be pre-empted while we have the lock bit set Kernel::ScopedCritical critical; #endif FlatPtr bits = PtrTraits::lock(m_bits); T* ptr = PtrTraits::as_ptr(bits); f(ptr); PtrTraits::unlock(m_bits, bits); } [[nodiscard]] ALWAYS_INLINE FlatPtr leak_ref_raw() { return PtrTraits::exchange(m_bits, PtrTraits::default_null_value); } [[nodiscard]] ALWAYS_INLINE FlatPtr add_ref_raw() const { #ifdef KERNEL // We don't want to be pre-empted while we have the lock bit set Kernel::ScopedCritical critical; #endif // This prevents a race condition between thread A and B: // 1. Thread A copies RefPtr, e.g. through assignment or copy constructor, // gets the pointer from source, but is pre-empted before adding // another reference // 2. Thread B calls clear, leak_ref, or release_nonnull on source, and // then drops the last reference, causing the object to be deleted // 3. Thread A finishes step #1 by attempting to add a reference to // the object that was already deleted in step #2 FlatPtr bits = PtrTraits::lock(m_bits); if (T* ptr = PtrTraits::as_ptr(bits)) ptr->ref(); PtrTraits::unlock(m_bits, bits); return bits; } ALWAYS_INLINE void assign_raw(FlatPtr bits) { FlatPtr prev_bits = PtrTraits::exchange(m_bits, bits); unref_if_not_null(PtrTraits::as_ptr(prev_bits)); } ALWAYS_INLINE T* as_ptr() const { return PtrTraits::as_ptr(m_bits.load(AK::MemoryOrder::memory_order_relaxed)); } ALWAYS_INLINE T* as_nonnull_ptr() const { return as_nonnull_ptr(m_bits.load(AK::MemoryOrder::memory_order_relaxed)); } ALWAYS_INLINE T* as_nonnull_ptr(FlatPtr bits) const { ASSERT(!PtrTraits::is_null(bits)); return PtrTraits::as_ptr(bits); } mutable Atomic m_bits { PtrTraits::default_null_value }; }; template> inline const LogStream& operator<<(const LogStream& stream, const RefPtr& value) { return stream << value.ptr(); } template struct Traits> : public GenericTraits> { using PeekType = const T*; static unsigned hash(const RefPtr& p) { return ptr_hash(p.ptr()); } static bool equals(const RefPtr& a, const RefPtr& b) { return a.ptr() == b.ptr(); } }; template inline NonnullRefPtr static_ptr_cast(const NonnullRefPtr& ptr) { return NonnullRefPtr(static_cast(*ptr)); } template> inline RefPtr static_ptr_cast(const RefPtr& ptr) { return RefPtr(static_cast(ptr.ptr())); } template inline void swap(RefPtr& a, RefPtr& b) { a.swap(b); } } using AK::RefPtr; using AK::static_ptr_cast;