diff options
author | Gunnar Beutner <gbeutner@serenityos.org> | 2021-06-04 12:59:53 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-06-04 19:32:25 +0200 |
commit | 44418cb35138ce2af09ec2945a4ae8c9cb816feb (patch) | |
tree | 0d87875ffe5d75d96f73c0c9b468e3a0c4106d5f /AK | |
parent | fab9b2f068cb324e38b1f22e95ffa424d93da443 (diff) | |
download | serenity-44418cb35138ce2af09ec2945a4ae8c9cb816feb.zip |
AK: Allow deferring clear() for the Function class
Ideally a Function should not be clear()ed while it's running.
Unfortunately a number of callers do just that and identifying all of
them would require inspecting all call sites for operator() and clear().
Instead this adds support for deferring clear() until after the
function has finished executing.
Diffstat (limited to 'AK')
-rw-r--r-- | AK/Function.h | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/AK/Function.h b/AK/Function.h index 5e4f639966..85ee9de9b1 100644 --- a/AK/Function.h +++ b/AK/Function.h @@ -48,7 +48,7 @@ public: ~Function() { - clear(); + clear(false); } template<typename CallableType, class = typename EnableIf<!(IsPointer<CallableType> && IsFunction<RemovePointer<CallableType>>)&&IsRvalueReference<CallableType&&>>::Type> @@ -74,7 +74,8 @@ public: VERIFY(wrapper); ++m_call_nesting_level; ScopeGuard guard([this] { - --m_call_nesting_level; + if (--m_call_nesting_level == 0 && m_deferred_clear) + const_cast<Function*>(this)->clear(false); }); return wrapper->call(forward<In>(in)...); } @@ -181,8 +182,15 @@ private: } } - void clear() + void clear(bool may_defer = true) { + bool called_from_inside_function = m_call_nesting_level > 0; + VERIFY(may_defer || !called_from_inside_function); + if (called_from_inside_function && may_defer) { + m_deferred_clear = true; + return; + } + m_deferred_clear = false; auto* wrapper = callable_wrapper(); if (m_kind == FunctionKind::Inline) { VERIFY(wrapper); @@ -229,6 +237,7 @@ private: } FunctionKind m_kind { FunctionKind::NullPointer }; + bool m_deferred_clear { false }; mutable Atomic<u16> m_call_nesting_level { 0 }; // Empirically determined to fit most lambdas and functions. static constexpr size_t inline_capacity = 4 * sizeof(void*); |