summaryrefslogtreecommitdiff
path: root/Kernel/Library
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2023-03-07 12:25:00 +0100
committerAndreas Kling <kling@serenityos.org>2023-03-09 21:54:59 +0100
commite6fc7b3ff7618cd88a03683eb5b2145c9932d562 (patch)
treea94f6090c00962ac0468a402de4d92977f813481 /Kernel/Library
parent067d0689c5f7ff720fcfec9ff4d2898741948fed (diff)
downloadserenity-e6fc7b3ff7618cd88a03683eb5b2145c9932d562.zip
Kernel: Switch LockRefPtr<Inode> to RefPtr<Inode>
The main place where this is a little iffy is in RAMFS where inodes have a LockWeakPtr to their parent inode. I've left that as a LockWeakPtr for now.
Diffstat (limited to 'Kernel/Library')
-rw-r--r--Kernel/Library/LockWeakPtr.h14
-rw-r--r--Kernel/Library/LockWeakable.h6
2 files changed, 10 insertions, 10 deletions
diff --git a/Kernel/Library/LockWeakPtr.h b/Kernel/Library/LockWeakPtr.h
index f6bacbe6b7..f2fd204d94 100644
--- a/Kernel/Library/LockWeakPtr.h
+++ b/Kernel/Library/LockWeakPtr.h
@@ -143,7 +143,7 @@ public:
// use unsafe_ptr(), but as the name suggests, it is not safe...
LockRefPtr<T> ref;
// Using do_while_locked protects against a race with clear()!
- m_link.do_while_locked([&](WeakLink* link) {
+ m_link.do_while_locked([&](LockWeakLink* link) {
if (link)
ref = link->template strong_ref<T>();
});
@@ -153,7 +153,7 @@ public:
[[nodiscard]] T* unsafe_ptr() const
{
T* ptr = nullptr;
- m_link.do_while_locked([&](WeakLink* link) {
+ m_link.do_while_locked([&](LockWeakLink* link) {
if (link)
ptr = link->unsafe_ptr<T>();
});
@@ -165,15 +165,15 @@ public:
[[nodiscard]] bool is_null() const { return !m_link || m_link->is_null(); }
void clear() { m_link = nullptr; }
- [[nodiscard]] LockRefPtr<WeakLink> take_link() { return move(m_link); }
+ [[nodiscard]] LockRefPtr<LockWeakLink> take_link() { return move(m_link); }
private:
- LockWeakPtr(LockRefPtr<WeakLink> const& link)
+ LockWeakPtr(LockRefPtr<LockWeakLink> const& link)
: m_link(link)
{
}
- LockRefPtr<WeakLink> m_link;
+ LockRefPtr<LockWeakLink> m_link;
};
template<typename T>
@@ -196,10 +196,10 @@ inline ErrorOr<LockWeakPtr<U>> LockWeakable<T>::try_make_weak_ptr() const
return LockWeakPtr<U> {};
}
if (!m_link) {
- // There is a small chance that we create a new WeakLink and throw
+ // There is a small chance that we create a new LockWeakLink and throw
// it away because another thread beat us to it. But the window is
// pretty small and the overhead isn't terrible.
- m_link.assign_if_null(TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) WeakLink(const_cast<T&>(static_cast<T const&>(*this))))));
+ m_link.assign_if_null(TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) LockWeakLink(const_cast<T&>(static_cast<T const&>(*this))))));
}
LockWeakPtr<U> weak_ptr(m_link);
diff --git a/Kernel/Library/LockWeakable.h b/Kernel/Library/LockWeakable.h
index 5c21382e25..6c146e665b 100644
--- a/Kernel/Library/LockWeakable.h
+++ b/Kernel/Library/LockWeakable.h
@@ -21,7 +21,7 @@ class LockWeakable;
template<typename T>
class LockWeakPtr;
-class WeakLink final : public AtomicRefCounted<WeakLink> {
+class LockWeakLink final : public AtomicRefCounted<LockWeakLink> {
template<typename T>
friend class LockWeakable;
template<typename T>
@@ -82,7 +82,7 @@ public:
private:
template<typename T>
- explicit WeakLink(T& weakable)
+ explicit LockWeakLink(T& weakable)
: m_ptr(&weakable)
{
}
@@ -115,7 +115,7 @@ protected:
}
private:
- mutable LockRefPtr<WeakLink> m_link;
+ mutable LockRefPtr<LockWeakLink> m_link;
Atomic<bool> m_being_destroyed { false };
};