summaryrefslogtreecommitdiff
path: root/Kernel/VM/MemoryManager.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-07-11 14:29:02 +0200
committerAndreas Kling <kling@serenityos.org>2021-07-11 15:15:57 +0200
commitb2cd9b2c882431073c98c9a42ebac5895f339208 (patch)
tree57588f3901e2e68729864aeaca0baef72f76c7fd /Kernel/VM/MemoryManager.cpp
parent29d53cbee2cb153a50d63e12311c70209d8e7c05 (diff)
downloadserenity-b2cd9b2c882431073c98c9a42ebac5895f339208.zip
Kernel: Remove pointless ref-counting from PhysicalRegion
These are not multiple-owner objects and have no use for ref-counting. Make them simple value types instead (not eternal heap-allocated.)
Diffstat (limited to 'Kernel/VM/MemoryManager.cpp')
-rw-r--r--Kernel/VM/MemoryManager.cpp14
1 files changed, 7 insertions, 7 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index fb3b0a5451..01845f5138 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -174,7 +174,7 @@ bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_addres
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
{
- RefPtr<PhysicalRegion> physical_region;
+ PhysicalRegion* physical_region { nullptr };
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
@@ -256,9 +256,9 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
continue;
// Assign page to user physical physical_region.
- if (physical_region.is_null() || physical_region->upper().offset(PAGE_SIZE) != addr) {
+ if (!physical_region || physical_region->upper().offset(PAGE_SIZE) != addr) {
m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
- physical_region = m_user_physical_regions.last();
+ physical_region = &m_user_physical_regions.last();
} else {
physical_region->expand(physical_region->lower(), addr);
}
@@ -336,10 +336,10 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto physical_page_array_pages_and_page_tables_count = physical_page_array_pages + needed_page_table_count;
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
- RefPtr<PhysicalRegion> found_region;
+ PhysicalRegion* found_region { nullptr };
for (auto& region : m_user_physical_regions) {
if (region.size() >= physical_page_array_pages_and_page_tables_count) {
- found_region = region;
+ found_region = &region;
break;
}
}
@@ -354,10 +354,10 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
// We're stealing the entire region
+ m_physical_pages_region = move(*found_region);
m_user_physical_regions.remove_first_matching([&](auto& region) {
- return region == found_region.ptr();
+ return &region == found_region;
});
- m_physical_pages_region = found_region.release_nonnull();
} else {
m_physical_pages_region = found_region->take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
}