summaryrefslogtreecommitdiff
path: root/Kernel/VM/MemoryManager.cpp
diff options
context:
space:
mode:
authorJean-Baptiste Boric <jblbeurope@gmail.com>2021-01-20 17:49:55 +0100
committerAndreas Kling <kling@serenityos.org>2021-01-22 22:17:39 +0100
commit3cbe8054864a3496dc5d097a1902b3406614113f (patch)
tree27cd5a6bd135140be58e2baeec2de79092b963a1 /Kernel/VM/MemoryManager.cpp
parent5cd1217b6e07a913d2f9c1938fbbe5e0a1bcc8f5 (diff)
downloadserenity-3cbe8054864a3496dc5d097a1902b3406614113f.zip
Kernel: Move kmalloc heaps and super pages inside .bss segment
The kernel ignored the first 8 MiB of RAM while parsing the memory map because the kmalloc heaps and the super physical pages lived here. Move all that stuff inside the .bss segment so that those memory regions are accounted for, otherwise we risk overwriting boot modules placed next to the kernel.
Diffstat (limited to 'Kernel/VM/MemoryManager.cpp')
-rw-r--r--Kernel/VM/MemoryManager.cpp58
1 files changed, 29 insertions, 29 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index f1028fb47c..a08c255fb6 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -49,6 +49,12 @@ extern FlatPtr start_of_kernel_text;
extern FlatPtr start_of_kernel_data;
extern FlatPtr end_of_kernel_bss;
+extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
+extern size_t multiboot_copy_boot_modules_count;
+
+// Treat the super pages as logically separate from .bss
+__attribute__((section(".super_pages"))) static u8 super_pages[1 * MiB];
+
namespace Kernel {
// NOTE: We can NOT use AK::Singleton for this class, because
@@ -103,14 +109,9 @@ void MemoryManager::protect_kernel_image()
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_writable(false);
}
-
if (Processor::current().has_feature(CPUFeature::NX)) {
- // Disable execution of the kernel data and bss segments, as well as the kernel heap.
- for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) {
- auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
- pte.set_execute_disabled(true);
- }
- for (size_t i = FlatPtr(kmalloc_start); i < FlatPtr(kmalloc_end); i += PAGE_SIZE) {
+ // Disable execution of the kernel data, bss and heap segments.
+ for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_image; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_execute_disabled(true);
}
@@ -120,14 +121,12 @@ void MemoryManager::protect_kernel_image()
void MemoryManager::parse_memory_map()
{
RefPtr<PhysicalRegion> region;
- bool region_is_super = false;
// We need to make sure we exclude the kmalloc range as well as the kernel image.
// The kmalloc range directly follows the kernel image
const PhysicalAddress used_range_start(virtual_to_low_physical(FlatPtr(&start_of_kernel_image)));
- const PhysicalAddress used_range_end(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(kmalloc_end))));
- klog() << "MM: kernel range: " << used_range_start << " - " << PhysicalAddress(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(&end_of_kernel_image))));
- klog() << "MM: kmalloc range: " << PhysicalAddress(virtual_to_low_physical(FlatPtr(kmalloc_start))) << " - " << used_range_end;
+ const PhysicalAddress used_range_end(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)));
+ klog() << "MM: kernel range: " << used_range_start << " - " << used_range_end;
auto* mmap = (multiboot_memory_map_t*)(low_physical_to_virtual(multiboot_info_ptr->mmap_addr));
for (; (unsigned long)mmap < (low_physical_to_virtual(multiboot_info_ptr->mmap_addr)) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
@@ -161,31 +160,32 @@ void MemoryManager::parse_memory_map()
for (size_t page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
auto addr = PhysicalAddress(page_base);
- if (addr.get() < used_range_end.get() && addr.get() >= used_range_start.get())
+ // Skip used memory ranges.
+ bool should_skip = false;
+ for (auto used_range : m_used_memory_ranges) {
+ if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
+ should_skip = true;
+ break;
+ }
+ }
+ if (should_skip)
continue;
- if (page_base < 7 * MiB) {
- // nothing
- } else if (page_base >= 7 * MiB && page_base < 8 * MiB) {
- if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
- m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
- region = m_super_physical_regions.last();
- region_is_super = true;
- } else {
- region->expand(region->lower(), addr);
- }
+ // Assign page to user physical region.
+ if (region.is_null() || region->upper().offset(PAGE_SIZE) != addr) {
+ m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
+ region = m_user_physical_regions.last();
} else {
- if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
- m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
- region = m_user_physical_regions.last();
- region_is_super = false;
- } else {
- region->expand(region->lower(), addr);
- }
+ region->expand(region->lower(), addr);
}
}
}
+ // Append statically-allocated super physical region.
+ m_super_physical_regions.append(PhysicalRegion::create(
+ PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages))),
+ PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages))))));
+
for (auto& region : m_super_physical_regions) {
m_super_physical_pages += region.finalize_capacity();
klog() << "Super physical region: " << region.lower() << " - " << region.upper();