summaryrefslogtreecommitdiff
path: root/Kernel/VM/MemoryManager.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-07-12 22:52:17 +0200
committerAndreas Kling <kling@serenityos.org>2021-07-13 22:40:25 +0200
commitba87571366ddae18d6d7659cf94cd44fe1e49519 (patch)
tree7b7206eb38854ace55fbffb742bbcc71c70b98de /Kernel/VM/MemoryManager.cpp
parentbe83b3aff464e175bbe6800a4458a89e3f26e6b8 (diff)
downloadserenity-ba87571366ddae18d6d7659cf94cd44fe1e49519.zip
Kernel: Implement zone-based buddy allocator for physical memory
The previous allocator was very naive and kept the state of all pages in one big bitmap. When allocating, we had to scan through the bitmap until we found an unset bit. This patch introduces a new binary buddy allocator that manages the physical memory pages. Each PhysicalRegion is divided into zones (PhysicalZone) of 16MB each. Any extra pages at the end of physical RAM that don't fit into a 16MB zone are turned into 15 or fewer 1MB zones. Each zone starts out with one full-sized block, which is then recursively subdivided into halves upon allocation, until a block of the request size can be returned. There are more opportunities for improvement here: the way zone objects are allocated and stored is non-optimal. Same goes for the allocation of buddy block state bitmaps.
Diffstat (limited to 'Kernel/VM/MemoryManager.cpp')
-rw-r--r--Kernel/VM/MemoryManager.cpp31
1 files changed, 18 insertions, 13 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index be576ae43a..e2cc4e019b 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -257,7 +257,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Assign page to user physical physical_region.
if (!physical_region || physical_region->upper().offset(PAGE_SIZE) != addr) {
- m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
+ m_user_physical_regions.append(PhysicalRegion::try_create(addr, addr).release_nonnull());
physical_region = &m_user_physical_regions.last();
} else {
physical_region->expand(physical_region->lower(), addr);
@@ -266,9 +266,10 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
}
// Append statically-allocated super physical physical_region.
- m_super_physical_regions.append(PhysicalRegion::create(
+ m_super_physical_regions.append(PhysicalRegion::try_create(
PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages))),
- PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages))))));
+ PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages)))))
+ .release_nonnull());
for (auto& region : m_super_physical_regions)
m_system_memory_info.super_physical_pages += region.finalize_capacity();
@@ -293,11 +294,15 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
dmesgln("MM: {} range @ {} - {} (size 0x{:x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
}
- for (auto& region : m_super_physical_regions)
+ for (auto& region : m_super_physical_regions) {
dmesgln("MM: Super physical region: {} - {} (size 0x{:x})", region.lower(), region.upper().offset(-1), PAGE_SIZE * region.size());
+ region.initialize_zones();
+ }
- for (auto& region : m_user_physical_regions)
+ for (auto& region : m_user_physical_regions) {
dmesgln("MM: User physical region: {} - {} (size 0x{:x})", region.lower(), region.upper().offset(-1), PAGE_SIZE * region.size());
+ region.initialize_zones();
+ }
}
extern "C" PageDirectoryEntry boot_pd3[1024];
@@ -337,9 +342,12 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
PhysicalRegion* found_region { nullptr };
- for (auto& region : m_user_physical_regions) {
+ Optional<size_t> found_region_index;
+ for (size_t i = 0; i < m_user_physical_regions.size(); ++i) {
+ auto& region = m_user_physical_regions[i];
if (region.size() >= physical_page_array_pages_and_page_tables_count) {
found_region = &region;
+ found_region_index = i;
break;
}
}
@@ -354,12 +362,9 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
// We're stealing the entire region
- m_physical_pages_region = move(*found_region);
- m_user_physical_regions.remove_first_matching([&](auto& region) {
- return &region == found_region;
- });
+ m_physical_pages_region = m_user_physical_regions.take(*found_region_index);
} else {
- m_physical_pages_region = found_region->take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
+ m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
}
m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
@@ -445,7 +450,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
- auto physical_page = adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(false));
+ auto physical_page = adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(false));
auto result = kernel_page_tables.set(virtual_page_array_current_page & ~0x1fffff, move(physical_page));
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
@@ -465,7 +470,7 @@ PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physic
PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical_page)
{
- PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, physical_page));
+ PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, allocated.physical_page));
VERIFY(m_physical_page_entries);
size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
VERIFY(physical_page_entry_index < m_physical_page_entries_count);