diff options
author | Nicholas Baron <nicholas.baron.ten@gmail.com> | 2021-05-16 02:36:52 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-05-16 10:36:52 +0100 |
commit | aa4d41fe2c473c3bb78327a1dbe8ec85530259ca (patch) | |
tree | 925d408b37ab1f7750a3af37adfb2949fcafa836 /Kernel/VM | |
parent | bbaa4630323c20e37e2a0ead478987cb5f02fc53 (diff) | |
download | serenity-aa4d41fe2c473c3bb78327a1dbe8ec85530259ca.zip |
AK+Kernel+LibELF: Remove the need for `IteratorDecision::Continue`
By constraining two implementations, the compiler will select the best
fitting one. All this will require is duplicating the implementation and
simplifying for the `void` case.
This constraining also informs both the caller and compiler by passing
the callback parameter types as part of the constraint
(e.g.: `IterationFunction<int>`).
Some `for_each` functions in LibELF only take functions which return
`void`. This is a minimal correctness check, as it removes one way for a
function to incompletely do something.
There seems to be a possible idiom where inside a lambda, a `return;` is
the same as `continue;` in a for-loop.
Diffstat (limited to 'Kernel/VM')
-rw-r--r-- | Kernel/VM/AnonymousVMObject.cpp | 14 | ||||
-rw-r--r-- | Kernel/VM/AnonymousVMObject.h | 26 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.h | 10 |
3 files changed, 36 insertions, 14 deletions
diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp index 912e4cb275..576c040ae5 100644 --- a/Kernel/VM/AnonymousVMObject.cpp +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -23,13 +23,11 @@ RefPtr<VMObject> AnonymousVMObject::clone() // so that the parent is still guaranteed to be able to have all // non-volatile memory available. size_t need_cow_pages = 0; - { - // We definitely need to commit non-volatile areas - for_each_nonvolatile_range([&](const VolatilePageRange& nonvolatile_range) { - need_cow_pages += nonvolatile_range.count; - return IterationDecision::Continue; - }); - } + + // We definitely need to commit non-volatile areas + for_each_nonvolatile_range([&](const VolatilePageRange& nonvolatile_range) { + need_cow_pages += nonvolatile_range.count; + }); dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, need_cow_pages); @@ -220,7 +218,6 @@ int AnonymousVMObject::purge_impl() } }); } - return IterationDecision::Continue; }); return purged_page_count; } @@ -284,7 +281,6 @@ void AnonymousVMObject::update_volatile_cache() m_volatile_ranges_cache.clear(); for_each_nonvolatile_range([&](const VolatilePageRange& range) { m_volatile_ranges_cache.add_unchecked(range); - return IterationDecision::Continue; }); m_volatile_ranges_cache_dirty = false; diff --git a/Kernel/VM/AnonymousVMObject.h b/Kernel/VM/AnonymousVMObject.h index bdca7c0336..85afc0d53a 100644 --- a/Kernel/VM/AnonymousVMObject.h +++ b/Kernel/VM/AnonymousVMObject.h @@ -40,7 +40,7 @@ public: bool is_any_volatile() const; - template<typename F> + template<IteratorFunction<const VolatilePageRange&> F> IterationDecision for_each_volatile_range(F f) const { VERIFY(m_lock.is_locked()); @@ -78,24 +78,42 @@ public: return IterationDecision::Continue; } - template<typename F> + template<IteratorFunction<const VolatilePageRange&> F> IterationDecision for_each_nonvolatile_range(F f) const { size_t base = 0; for_each_volatile_range([&](const VolatilePageRange& volatile_range) { if (volatile_range.base == base) return IterationDecision::Continue; - IterationDecision decision = f({ base, volatile_range.base - base }); + IterationDecision decision = f(VolatilePageRange { base, volatile_range.base - base }); if (decision != IterationDecision::Continue) return decision; base = volatile_range.base + volatile_range.count; return IterationDecision::Continue; }); if (base < page_count()) - return f({ base, page_count() - base }); + return f(VolatilePageRange { base, page_count() - base }); return IterationDecision::Continue; } + template<VoidFunction<const VolatilePageRange&> F> + IterationDecision for_each_volatile_range(F f) const + { + return for_each_volatile_range([&](auto& range) { + f(range); + return IterationDecision::Continue; + }); + } + + template<VoidFunction<const VolatilePageRange&> F> + IterationDecision for_each_nonvolatile_range(F f) const + { + return for_each_nonvolatile_range([&](auto range) { + f(move(range)); + return IterationDecision::Continue; + }); + } + private: explicit AnonymousVMObject(size_t, AllocationStrategy); explicit AnonymousVMObject(PhysicalAddress, size_t); diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index af3db1243a..cb6b860b17 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -6,6 +6,7 @@ #pragma once +#include <AK/Concepts.h> #include <AK/HashTable.h> #include <AK/NonnullRefPtrVector.h> #include <AK/String.h> @@ -157,7 +158,7 @@ public: unsigned super_physical_pages() const { return m_super_physical_pages; } unsigned super_physical_pages_used() const { return m_super_physical_pages_used; } - template<typename Callback> + template<IteratorFunction<VMObject&> Callback> static void for_each_vmobject(Callback callback) { for (auto& vmobject : MM.m_vmobjects) { @@ -166,6 +167,13 @@ public: } } + template<VoidFunction<VMObject&> Callback> + static void for_each_vmobject(Callback callback) + { + for (auto& vmobject : MM.m_vmobjects) + callback(vmobject); + } + static Region* find_region_from_vaddr(Space&, VirtualAddress); static Region* find_user_region_from_vaddr(Space&, VirtualAddress); |