summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-04-02 01:28:01 +0200
committerAndreas Kling <kling@serenityos.org>2022-04-03 21:51:58 +0200
commit2617adac526fd2ac897f81d26d9203b179ec7abe (patch)
tree2814b5a5a62a97109874998e3b06c4d6041c7fd4 /Kernel/Memory
parent738147483517a572625cc7089ad5b955faaa9f69 (diff)
downloadserenity-2617adac526fd2ac897f81d26d9203b179ec7abe.zip
Kernel: Store AddressSpace memory regions in an IntrusiveRedBlackTree
This means we never need to allocate when inserting/removing regions from the address space.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/AddressSpace.cpp74
-rw-r--r--Kernel/Memory/AddressSpace.h9
-rw-r--r--Kernel/Memory/Region.h1
3 files changed, 51 insertions, 33 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp
index e8cb21caac..cccc3163c4 100644
--- a/Kernel/Memory/AddressSpace.cpp
+++ b/Kernel/Memory/AddressSpace.cpp
@@ -30,7 +30,20 @@ AddressSpace::AddressSpace(NonnullRefPtr<PageDirectory> page_directory)
{
}
-AddressSpace::~AddressSpace() = default;
+AddressSpace::~AddressSpace()
+{
+ delete_all_regions_assuming_they_are_unmapped();
+}
+
+void AddressSpace::delete_all_regions_assuming_they_are_unmapped()
+{
+ // FIXME: This could definitely be done in a more efficient manner.
+ while (!m_regions.is_empty()) {
+ auto& region = *m_regions.begin();
+ m_regions.remove(region.vaddr().get());
+ delete &region;
+ }
+}
ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
{
@@ -208,9 +221,9 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
SpinlockLocker lock(m_lock);
- auto found_region = m_regions.unsafe_remove(region.vaddr().get());
- VERIFY(found_region.ptr() == &region);
- return found_region;
+ auto did_remove = m_regions.remove(region.vaddr().get());
+ VERIFY(did_remove);
+ return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
}
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
@@ -221,9 +234,9 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
return nullptr;
auto& region = *found_region;
auto rounded_range_size = page_round_up(range.size());
- if (rounded_range_size.is_error() || region->size() != rounded_range_size.value())
+ if (rounded_range_size.is_error() || region.size() != rounded_range_size.value())
return nullptr;
- return region;
+ return &region;
}
Region* AddressSpace::find_region_containing(VirtualRange const& range)
@@ -232,7 +245,7 @@ Region* AddressSpace::find_region_containing(VirtualRange const& range)
auto* candidate = m_regions.find_largest_not_above(range.base().get());
if (!candidate)
return nullptr;
- return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
+ return (*candidate).range().contains(range) ? candidate : nullptr;
}
ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange const& range)
@@ -245,12 +258,12 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
auto* found_region = m_regions.find_largest_not_above(range.base().get());
if (!found_region)
return regions;
- for (auto iter = m_regions.begin_from((*found_region)->vaddr().get()); !iter.is_end(); ++iter) {
- auto const& iter_range = (*iter)->range();
+ for (auto iter = m_regions.begin_from((*found_region).vaddr().get()); !iter.is_end(); ++iter) {
+ auto const& iter_range = (*iter).range();
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
- TRY(regions.try_append(*iter));
+ TRY(regions.try_append(&*iter));
- total_size_collected += (*iter)->size() - iter_range.intersect(range).size();
+ total_size_collected += (*iter).size() - iter_range.intersect(range).size();
if (total_size_collected == range.size())
break;
}
@@ -261,9 +274,10 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
ErrorOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
{
- auto* ptr = region.ptr();
SpinlockLocker lock(m_lock);
- TRY(m_regions.try_insert(region->vaddr().get(), move(region)));
+ // NOTE: We leak the region into the IRBT here. It must be deleted or readopted when removed from the tree.
+ auto* ptr = region.leak_ptr();
+ m_regions.insert(ptr->vaddr().get(), *ptr);
return ptr;
}
@@ -300,8 +314,7 @@ void AddressSpace::dump_regions()
SpinlockLocker lock(m_lock);
- for (auto const& sorted_region : m_regions) {
- auto const& region = *sorted_region;
+ for (auto const& region : m_regions) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
@@ -322,9 +335,10 @@ void AddressSpace::remove_all_regions(Badge<Process>)
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
for (auto& region : m_regions)
- (*region).unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker);
+ region.unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker);
}
- m_regions.clear();
+
+ delete_all_regions_assuming_they_are_unmapped();
}
size_t AddressSpace::amount_dirty_private() const
@@ -335,8 +349,8 @@ size_t AddressSpace::amount_dirty_private() const
// That's probably a situation that needs to be looked at in general.
size_t amount = 0;
for (auto const& region : m_regions) {
- if (!region->is_shared())
- amount += region->amount_dirty();
+ if (!region.is_shared())
+ amount += region.amount_dirty();
}
return amount;
}
@@ -346,8 +360,8 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
SpinlockLocker lock(m_lock);
HashTable<InodeVMObject const*> vmobjects;
for (auto const& region : m_regions) {
- if (region->vmobject().is_inode())
- TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region->vmobject())));
+ if (region.vmobject().is_inode())
+ TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
}
size_t amount = 0;
for (auto& vmobject : vmobjects)
@@ -360,7 +374,7 @@ size_t AddressSpace::amount_virtual() const
SpinlockLocker lock(m_lock);
size_t amount = 0;
for (auto const& region : m_regions) {
- amount += region->size();
+ amount += region.size();
}
return amount;
}
@@ -371,7 +385,7 @@ size_t AddressSpace::amount_resident() const
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
for (auto const& region : m_regions) {
- amount += region->amount_resident();
+ amount += region.amount_resident();
}
return amount;
}
@@ -385,7 +399,7 @@ size_t AddressSpace::amount_shared() const
// so that every Region contributes +1 ref to each of its PhysicalPages.
size_t amount = 0;
for (auto const& region : m_regions) {
- amount += region->amount_shared();
+ amount += region.amount_shared();
}
return amount;
}
@@ -395,11 +409,11 @@ size_t AddressSpace::amount_purgeable_volatile() const
SpinlockLocker lock(m_lock);
size_t amount = 0;
for (auto const& region : m_regions) {
- if (!region->vmobject().is_anonymous())
+ if (!region.vmobject().is_anonymous())
continue;
- auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
+ auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && vmobject.is_volatile())
- amount += region->amount_resident();
+ amount += region.amount_resident();
}
return amount;
}
@@ -409,11 +423,11 @@ size_t AddressSpace::amount_purgeable_nonvolatile() const
SpinlockLocker lock(m_lock);
size_t amount = 0;
for (auto const& region : m_regions) {
- if (!region->vmobject().is_anonymous())
+ if (!region.vmobject().is_anonymous())
continue;
- auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
+ auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && !vmobject.is_volatile())
- amount += region->amount_resident();
+ amount += region.amount_resident();
}
return amount;
}
diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h
index 0c08647749..a4c04a3d17 100644
--- a/Kernel/Memory/AddressSpace.h
+++ b/Kernel/Memory/AddressSpace.h
@@ -12,6 +12,7 @@
#include <AK/WeakPtr.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/PageDirectory.h>
+#include <Kernel/Memory/Region.h>
#include <Kernel/UnixTypes.h>
namespace Kernel::Memory {
@@ -28,8 +29,8 @@ public:
size_t region_count() const { return m_regions.size(); }
- RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() { return m_regions; }
- RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> const& regions() const { return m_regions; }
+ auto& regions() { return m_regions; }
+ auto const& regions() const { return m_regions; }
void dump_regions();
@@ -68,11 +69,13 @@ public:
private:
explicit AddressSpace(NonnullRefPtr<PageDirectory>);
+ void delete_all_regions_assuming_they_are_unmapped();
+
mutable RecursiveSpinlock m_lock;
RefPtr<PageDirectory> m_page_directory;
- RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions;
+ IntrusiveRedBlackTree<&Region::m_tree_node> m_regions;
bool m_enforces_syscall_regions { false };
};
diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h
index dcaeae18a0..29c264c480 100644
--- a/Kernel/Memory/Region.h
+++ b/Kernel/Memory/Region.h
@@ -31,6 +31,7 @@ enum class ShouldFlushTLB {
class Region final
: public Weakable<Region> {
+ friend class AddressSpace;
friend class MemoryManager;
public: