summaryrefslogtreecommitdiff
path: root/AK/Weakable.h
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-09-01 18:37:58 +0200
committerAndreas Kling <kling@serenityos.org>2022-09-03 00:36:25 +0200
commit53c0038d2c44340cf8b3f123309afac9fbf964e1 (patch)
tree608e7377fe2b88b984aeca38ecf81737b7ac4a83 /AK/Weakable.h
parent159f9688dc1e1c407f9aaf991c99ae8ee8a1e7cb (diff)
downloadserenity-53c0038d2c44340cf8b3f123309afac9fbf964e1.zip
AK: Make Weakable non-atomic
Let's not punish single-threaded workloads with the performance cost of atomic weakables. The kernel keeps using LockWeakable.
Diffstat (limited to 'AK/Weakable.h')
-rw-r--r--AK/Weakable.h46
1 files changed, 6 insertions, 40 deletions
diff --git a/AK/Weakable.h b/AK/Weakable.h
index 4dd3d8d813..1c90ba595b 100644
--- a/AK/Weakable.h
+++ b/AK/Weakable.h
@@ -7,11 +7,9 @@
#pragma once
#include <AK/Assertions.h>
-#include <AK/Atomic.h>
#include <AK/RefCounted.h>
#include <AK/RefPtr.h>
#include <AK/StdLibExtras.h>
-#include <sched.h>
namespace AK {
@@ -31,50 +29,18 @@ public:
RefPtr<T> strong_ref() const
requires(IsBaseOf<RefCountedBase, T>)
{
- RefPtr<T> ref;
-
- {
- if (!(m_consumers.fetch_add(1u << 1, AK::MemoryOrder::memory_order_acquire) & 1u)) {
- T* ptr = (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire);
- if (ptr && ptr->try_ref())
- ref = adopt_ref(*ptr);
- }
- m_consumers.fetch_sub(1u << 1, AK::MemoryOrder::memory_order_release);
- }
-
- return ref;
+ return static_cast<T*>(m_ptr);
}
template<typename T>
T* unsafe_ptr() const
{
- if (m_consumers.load(AK::MemoryOrder::memory_order_relaxed) & 1u)
- return nullptr;
- // NOTE: This may return a non-null pointer even if revocation
- // has been triggered as there is a possible race! But it's "unsafe"
- // anyway because we return a raw pointer without ensuring a
- // reference...
- return (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire);
+ return static_cast<T*>(m_ptr);
}
- bool is_null() const
- {
- return unsafe_ptr<void>() == nullptr;
- }
+ bool is_null() const { return m_ptr == nullptr; }
- void revoke()
- {
- auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed);
- VERIFY(!(current_consumers & 1u));
- // We flagged revocation, now wait until everyone trying to obtain
- // a strong reference is done
- while (current_consumers > 0) {
- sched_yield();
- current_consumers = m_consumers.load(AK::MemoryOrder::memory_order_acquire) & ~1u;
- }
- // No one is trying to use it (anymore)
- m_ptr.store(nullptr, AK::MemoryOrder::memory_order_release);
- }
+ void revoke() { m_ptr = nullptr; }
private:
template<typename T>
@@ -82,8 +48,8 @@ private:
: m_ptr(&weakable)
{
}
- mutable Atomic<void*> m_ptr;
- mutable Atomic<unsigned> m_consumers; // LSB indicates revocation in progress
+
+ mutable void* m_ptr { nullptr };
};
template<typename T>