summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-23 12:28:04 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-24 14:57:51 +0200
commitdc9d2c1b10999a177f27b0f0f8c3ee6df3a61dad (patch)
tree6b7d5a02a206c1f9e4d40d09c641110fa20b5db3 /Kernel/Memory
parent352d6545a99955771c14736fda99d0ba6b124b60 (diff)
downloadserenity-dc9d2c1b10999a177f27b0f0f8c3ee6df3a61dad.zip
Kernel: Wrap RegionTree objects in SpinlockProtected
This makes locking them much more straightforward, and we can remove a bunch of confusing use of AddressSpace::m_lock. That lock will also be converted to use of SpinlockProtected in a subsequent patch.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/AddressSpace.cpp230
-rw-r--r--Kernel/Memory/AddressSpace.h13
-rw-r--r--Kernel/Memory/MemoryManager.cpp49
-rw-r--r--Kernel/Memory/MemoryManager.h2
-rw-r--r--Kernel/Memory/RegionTree.cpp5
5 files changed, 152 insertions, 147 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp
index cc2a765a40..f215e572cd 100644
--- a/Kernel/Memory/AddressSpace.cpp
+++ b/Kernel/Memory/AddressSpace.cpp
@@ -25,7 +25,7 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
VirtualRange total_range = [&]() -> VirtualRange {
if (parent)
- return parent->m_region_tree.total_range();
+ return parent->m_total_range;
constexpr FlatPtr userspace_range_base = USER_RANGE_BASE;
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
size_t random_offset = (get_fast_random<u8>() % 2 * MiB) & PAGE_MASK;
@@ -40,7 +40,8 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
AddressSpace::AddressSpace(NonnullLockRefPtr<PageDirectory> page_directory, VirtualRange total_range)
: m_page_directory(move(page_directory))
- , m_region_tree(total_range)
+ , m_total_range(total_range)
+ , m_region_tree(LockRank::None, total_range)
{
}
@@ -148,8 +149,10 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
if (source_region.should_cow(page_offset_in_source_region + i))
TRY(new_region->set_should_cow(i, true));
}
- SpinlockLocker locker(m_lock);
- TRY(m_region_tree.place_specifically(*new_region, range));
+ TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
+ TRY(region_tree.place_specifically(*new_region, range));
+ return {};
+ }));
return new_region.leak_ptr();
}
@@ -164,11 +167,14 @@ ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize
region_name = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
- if (requested_address.is_null()) {
- TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
- } else {
- TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
- }
+ TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
+ if (requested_address.is_null()) {
+ TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
+ } else {
+ TRY(region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
+ }
+ return {};
+ }));
TRY(region->map(page_directory(), ShouldFlushTLB::No));
return region.leak_ptr();
}
@@ -204,29 +210,29 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAdd
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
- SpinlockLocker locker(m_lock);
-
- if (requested_address.is_null())
- TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
- else
- TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
-
- ArmedScopeGuard remove_region_from_tree_on_failure = [this, &region]() {
- // At this point the region is already part of the Process region tree, so we have to make sure
- // we remove it from the tree before returning an error, or else the Region tree will contain
- // a dangling pointer to the free'd Region instance
- m_region_tree.remove(*region);
- };
-
- if (prot == PROT_NONE) {
- // For PROT_NONE mappings, we don't have to set up any page table mappings.
- // We do still need to attach the region to the page_directory though.
- region->set_page_directory(page_directory());
- } else {
- TRY(region->map(page_directory(), ShouldFlushTLB::No));
- }
- remove_region_from_tree_on_failure.disarm();
- return region.leak_ptr();
+ return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Region*> {
+ if (requested_address.is_null())
+ TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
+ else
+ TRY(region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
+
+ ArmedScopeGuard remove_region_from_tree_on_failure = [&] {
+ // At this point the region is already part of the Process region tree, so we have to make sure
+ // we remove it from the tree before returning an error, or else the Region tree will contain
+ // a dangling pointer to the free'd Region instance
+ region_tree.remove(*region);
+ };
+
+ if (prot == PROT_NONE) {
+ // For PROT_NONE mappings, we don't have to set up any page table mappings.
+ // We do still need to attach the region to the page_directory though.
+ region->set_page_directory(page_directory());
+ } else {
+ TRY(region->map(page_directory(), ShouldFlushTLB::No));
+ }
+ remove_region_from_tree_on_failure.disarm();
+ return region.leak_ptr();
+ });
}
void AddressSpace::deallocate_region(Region& region)
@@ -236,16 +242,14 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
- auto did_remove = m_region_tree.remove(region);
+ auto did_remove = m_region_tree.with([&](auto& region_tree) { return region_tree.remove(region); });
VERIFY(did_remove);
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
}
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
- auto* found_region = m_region_tree.regions().find(range.base().get());
+ auto* found_region = m_region_tree.with([&](auto& region_tree) { return region_tree.regions().find(range.base().get()); });
if (!found_region)
return nullptr;
auto& region = *found_region;
@@ -257,7 +261,9 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
- return m_region_tree.find_region_containing(range);
+ return m_region_tree.with([&](auto& region_tree) {
+ return region_tree.find_region_containing(range);
+ });
}
ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange const& range)
@@ -265,24 +271,23 @@ ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange
Vector<Region*, 4> regions = {};
size_t total_size_collected = 0;
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
-
- auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
- if (!found_region)
- return regions;
- for (auto iter = m_region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
- auto const& iter_range = (*iter).range();
- if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
- TRY(regions.try_append(&*iter));
-
- total_size_collected += (*iter).size() - iter_range.intersect(range).size();
- if (total_size_collected == range.size())
- break;
+ return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Vector<Region*, 4>> {
+ auto* found_region = region_tree.regions().find_largest_not_above(range.base().get());
+ if (!found_region)
+ return regions;
+ for (auto iter = region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
+ auto const& iter_range = (*iter).range();
+ if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
+ TRY(regions.try_append(&*iter));
+
+ total_size_collected += (*iter).size() - iter_range.intersect(range).size();
+ if (total_size_collected == range.size())
+ break;
+ }
}
- }
- return regions;
+ return regions;
+ });
}
// Carve out a virtual address range from a region and return the two regions on either side
@@ -316,61 +321,63 @@ void AddressSpace::dump_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
-
- for (auto const& region : m_region_tree.regions()) {
- dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
- region.is_readable() ? 'R' : ' ',
- region.is_writable() ? 'W' : ' ',
- region.is_executable() ? 'X' : ' ',
- region.is_shared() ? 'S' : ' ',
- region.is_stack() ? 'T' : ' ',
- region.is_syscall_region() ? 'C' : ' ',
- region.name());
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
+ dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
+ region.is_readable() ? 'R' : ' ',
+ region.is_writable() ? 'W' : ' ',
+ region.is_executable() ? 'X' : ' ',
+ region.is_shared() ? 'S' : ' ',
+ region.is_stack() ? 'T' : ' ',
+ region.is_syscall_region() ? 'C' : ' ',
+ region.name());
+ }
+ });
MM.dump_kernel_regions();
}
void AddressSpace::remove_all_regions(Badge<Process>)
{
VERIFY(Thread::current() == g_finalizer);
- SpinlockLocker locker(m_lock);
{
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
- for (auto& region : m_region_tree.regions())
- region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto& region : region_tree.regions())
+ region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
+ });
}
- m_region_tree.delete_all_regions_assuming_they_are_unmapped();
+ m_region_tree.with([&](auto& region_tree) {
+ region_tree.delete_all_regions_assuming_they_are_unmapped();
+ });
}
size_t AddressSpace::amount_dirty_private() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
size_t amount = 0;
- for (auto const& region : m_region_tree.regions()) {
- if (!region.is_shared())
- amount += region.amount_dirty();
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
+ if (!region.is_shared())
+ amount += region.amount_dirty();
+ }
+ });
return amount;
}
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
- HashTable<InodeVMObject const*> vmobjects;
- for (auto const& region : m_region_tree.regions()) {
- if (region.vmobject().is_inode())
- TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
- }
+ HashTable<LockRefPtr<InodeVMObject>> vmobjects;
+ TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
+ for (auto const& region : region_tree.regions()) {
+ if (region.vmobject().is_inode())
+ TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
+ }
+ return {};
+ }));
size_t amount = 0;
for (auto& vmobject : vmobjects)
amount += vmobject->amount_clean();
@@ -379,69 +386,68 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
size_t AddressSpace::amount_virtual() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
- for (auto const& region : m_region_tree.regions()) {
- amount += region.size();
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
+ amount += region.size();
+ }
+ });
return amount;
}
size_t AddressSpace::amount_resident() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
- for (auto const& region : m_region_tree.regions()) {
- amount += region.amount_resident();
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
+ amount += region.amount_resident();
+ }
+ });
return amount;
}
size_t AddressSpace::amount_shared() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
// so that every Region contributes +1 ref to each of its PhysicalPages.
size_t amount = 0;
- for (auto const& region : m_region_tree.regions()) {
- amount += region.amount_shared();
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
+ amount += region.amount_shared();
+ }
+ });
return amount;
}
size_t AddressSpace::amount_purgeable_volatile() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
- for (auto const& region : m_region_tree.regions()) {
- if (!region.vmobject().is_anonymous())
- continue;
- auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
- if (vmobject.is_purgeable() && vmobject.is_volatile())
- amount += region.amount_resident();
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
+ if (!region.vmobject().is_anonymous())
+ continue;
+ auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
+ if (vmobject.is_purgeable() && vmobject.is_volatile())
+ amount += region.amount_resident();
+ }
+ });
return amount;
}
size_t AddressSpace::amount_purgeable_nonvolatile() const
{
- SpinlockLocker lock(m_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
- for (auto const& region : m_region_tree.regions()) {
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto const& region : region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && !vmobject.is_volatile())
amount += region.amount_resident();
- }
+ } });
return amount;
}
diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h
index e07cb3988a..23f7c7a76b 100644
--- a/Kernel/Memory/AddressSpace.h
+++ b/Kernel/Memory/AddressSpace.h
@@ -10,6 +10,7 @@
#include <AK/RedBlackTree.h>
#include <AK/Vector.h>
#include <Kernel/Library/LockWeakPtr.h>
+#include <Kernel/Locking/SpinlockProtected.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/Region.h>
@@ -26,8 +27,8 @@ public:
PageDirectory& page_directory() { return *m_page_directory; }
PageDirectory const& page_directory() const { return *m_page_directory; }
- auto& regions() { return m_region_tree.regions(); }
- auto const& regions() const { return m_region_tree.regions(); }
+ SpinlockProtected<RegionTree>& region_tree() { return m_region_tree; }
+ SpinlockProtected<RegionTree> const& region_tree() const { return m_region_tree; }
void dump_regions();
@@ -62,8 +63,6 @@ public:
size_t amount_purgeable_volatile() const;
size_t amount_purgeable_nonvolatile() const;
- auto& region_tree() { return m_region_tree; }
-
private:
AddressSpace(NonnullLockRefPtr<PageDirectory>, VirtualRange total_range);
@@ -71,7 +70,11 @@ private:
LockRefPtr<PageDirectory> m_page_directory;
- RegionTree m_region_tree;
+ // NOTE: The total range is also in the RegionTree, but since it never changes,
+ // it's nice to have it in a place where we can access it without locking.
+ VirtualRange m_total_range;
+
+ SpinlockProtected<RegionTree> m_region_tree;
bool m_enforces_syscall_regions { false };
};
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index a2aaf67cf5..01d73bc959 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -83,7 +83,7 @@ static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
}
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
- : m_region_tree(kernel_virtual_range())
+ : m_region_tree(LockRank::None, kernel_virtual_range())
{
s_the = this;
@@ -434,13 +434,13 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
- MUST(m_region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }));
+ MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }); }));
}
// Allocate a virtual address range for our array
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
auto& region = *MUST(Region::create_unbacked()).leak_ptr();
- MUST(m_region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE));
+ MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE); }));
auto range = region.range();
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
@@ -643,7 +643,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress address)
if (is_user_address(address))
return nullptr;
- return MM.m_region_tree.find_region_containing(address);
+ return MM.m_region_tree.with([&](auto& region_tree) { return region_tree.find_region_containing(address); });
}
Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
@@ -747,7 +747,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
+ TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -790,7 +790,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
+ TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -803,7 +803,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE));
+ TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -817,7 +817,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
+ TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -1120,7 +1120,7 @@ bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vadd
void MemoryManager::unregister_kernel_region(Region& region)
{
VERIFY(region.is_kernel());
- m_region_tree.remove(region);
+ m_region_tree.with([&](auto& region_tree) { region_tree.remove(region); });
}
void MemoryManager::dump_kernel_regions()
@@ -1134,20 +1134,21 @@ void MemoryManager::dump_kernel_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(s_mm_lock);
- SpinlockLocker tree_locker(m_region_tree.get_lock());
- for (auto const& region : m_region_tree.regions()) {
- dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
- region.vaddr().get(),
- region.vaddr().offset(region.size() - 1).get(),
- region.size(),
- region.is_readable() ? 'R' : ' ',
- region.is_writable() ? 'W' : ' ',
- region.is_executable() ? 'X' : ' ',
- region.is_shared() ? 'S' : ' ',
- region.is_stack() ? 'T' : ' ',
- region.is_syscall_region() ? 'C' : ' ',
- region.name());
- }
+ m_region_tree.with([&](auto& region_tree) {
+ for (auto& region : region_tree.regions()) {
+ dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
+ region.vaddr().get(),
+ region.vaddr().offset(region.size() - 1).get(),
+ region.size(),
+ region.is_readable() ? 'R' : ' ',
+ region.is_writable() ? 'W' : ' ',
+ region.is_executable() ? 'X' : ' ',
+ region.is_shared() ? 'S' : ' ',
+ region.is_stack() ? 'T' : ' ',
+ region.is_syscall_region() ? 'C' : ' ',
+ region.name());
+ }
+ });
}
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
@@ -1201,7 +1202,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::create_identity_mapped_reg
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_unbacked_region_anywhere(size_t size, size_t alignment)
{
auto region = TRY(Region::create_unbacked());
- TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment));
+ TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment); }));
return region;
}
diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h
index 84eedc7570..6bf827773c 100644
--- a/Kernel/Memory/MemoryManager.h
+++ b/Kernel/Memory/MemoryManager.h
@@ -298,7 +298,7 @@ private:
PhysicalPageEntry* m_physical_page_entries { nullptr };
size_t m_physical_page_entries_count { 0 };
- RegionTree m_region_tree;
+ SpinlockProtected<RegionTree> m_region_tree;
Vector<UsedMemoryRange> m_used_memory_ranges;
Vector<PhysicalMemoryRange> m_physical_memory_ranges;
diff --git a/Kernel/Memory/RegionTree.cpp b/Kernel/Memory/RegionTree.cpp
index ac2d55093d..72c43b98e8 100644
--- a/Kernel/Memory/RegionTree.cpp
+++ b/Kernel/Memory/RegionTree.cpp
@@ -151,7 +151,6 @@ ErrorOr<VirtualRange> RegionTree::allocate_range_randomized(size_t size, size_t
ErrorOr<void> RegionTree::place_anywhere(Region& region, RandomizeVirtualAddress randomize_virtual_address, size_t size, size_t alignment)
{
- SpinlockLocker locker(m_lock);
auto range = TRY(randomize_virtual_address == RandomizeVirtualAddress::Yes ? allocate_range_randomized(size, alignment) : allocate_range_anywhere(size, alignment));
region.m_range = range;
m_regions.insert(region.vaddr().get(), region);
@@ -160,7 +159,6 @@ ErrorOr<void> RegionTree::place_anywhere(Region& region, RandomizeVirtualAddress
ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const& range)
{
- SpinlockLocker locker(m_lock);
auto allocated_range = TRY(allocate_range_specific(range.base(), range.size()));
region.m_range = allocated_range;
m_regions.insert(region.vaddr().get(), region);
@@ -169,13 +167,11 @@ ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const&
bool RegionTree::remove(Region& region)
{
- SpinlockLocker locker(m_lock);
return m_regions.remove(region.range().base().get());
}
Region* RegionTree::find_region_containing(VirtualAddress address)
{
- SpinlockLocker locker(m_lock);
auto* region = m_regions.find_largest_not_above(address.get());
if (!region || !region->contains(address))
return nullptr;
@@ -184,7 +180,6 @@ Region* RegionTree::find_region_containing(VirtualAddress address)
Region* RegionTree::find_region_containing(VirtualRange range)
{
- SpinlockLocker lock(m_lock);
auto* region = m_regions.find_largest_not_above(range.base().get());
if (!region || !region->contains(range))
return nullptr;