summaryrefslogtreecommitdiff
path: root/Kernel/VM/VMObject.h
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-07-25 17:11:50 +0200
committerAndreas Kling <kling@serenityos.org>2021-07-25 17:28:06 +0200
commit0d963fd64156944a78c6601d112d6b4c73ae4f90 (patch)
tree1cd0b96612e91235c2ed489b82724550c3979790 /Kernel/VM/VMObject.h
parentae3778c3032892f031f8104f99dfad6dd39d448c (diff)
downloadserenity-0d963fd64156944a78c6601d112d6b4c73ae4f90.zip
Kernel: Remove unnecessary counting of VMObject-attached Regions
VMObject already has an IntrusiveList of all the Regions that map it. We were keeping a counter in addition to this, and only using it in a single place to avoid iterating over the list in case it only had 1 entry. Simplify VMObject by removing this counter and always iterating the list even if there's only 1 entry. :^)
Diffstat (limited to 'Kernel/VM/VMObject.h')
-rw-r--r--Kernel/VM/VMObject.h5
1 files changed, 0 insertions, 5 deletions
diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h
index 7d2960328b..45a5976af8 100644
--- a/Kernel/VM/VMObject.h
+++ b/Kernel/VM/VMObject.h
@@ -52,19 +52,15 @@ public:
ALWAYS_INLINE void add_region(Region& region)
{
ScopedSpinLock locker(m_lock);
- m_regions_count++;
m_regions.append(region);
}
ALWAYS_INLINE void remove_region(Region& region)
{
ScopedSpinLock locker(m_lock);
- m_regions_count--;
m_regions.remove(region);
}
- ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; }
-
void register_on_deleted_handler(VMObjectDeletedHandler& handler)
{
ScopedSpinLock locker(m_on_deleted_lock);
@@ -93,7 +89,6 @@ private:
VMObject& operator=(VMObject&&) = delete;
VMObject(VMObject&&) = delete;
- Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_regions_count { 0 };
HashTable<VMObjectDeletedHandler*> m_on_deleted;
SpinLock<u8> m_on_deleted_lock;