diff options
-rw-r--r-- | Kernel/Heap/kmalloc.cpp | 21 | ||||
-rw-r--r-- | Kernel/Heap/kmalloc.h | 3 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 58 | ||||
-rw-r--r-- | Kernel/linker.ld | 7 |
4 files changed, 45 insertions, 44 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 934fac1a64..8d5fc06cd8 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -186,14 +186,11 @@ struct KmallocGlobalHeap { }; static KmallocGlobalHeap* g_kmalloc_global; +static u8 g_kmalloc_global_heap[sizeof(KmallocGlobalHeap)]; -// We need to make sure to not stomp on global variables or other parts -// of the kernel image! -extern u32 end_of_kernel_image; -u8* const kmalloc_start = (u8*)PAGE_ROUND_UP(&end_of_kernel_image); -u8* const kmalloc_end = kmalloc_start + (ETERNAL_RANGE_SIZE + POOL_SIZE) + sizeof(KmallocGlobalHeap); -#define ETERNAL_BASE (kmalloc_start + sizeof(KmallocGlobalHeap)) -#define KMALLOC_BASE (ETERNAL_BASE + ETERNAL_RANGE_SIZE) +// Treat the heap as logically separate from .bss +__attribute__((section(".heap"))) static u8 kmalloc_eternal_heap[ETERNAL_RANGE_SIZE]; +__attribute__((section(".heap"))) static u8 kmalloc_pool_heap[POOL_SIZE]; static size_t g_kmalloc_bytes_eternal = 0; static size_t g_kmalloc_call_count; @@ -215,13 +212,15 @@ void kmalloc_enable_expand() void kmalloc_init() { - memset((void*)KMALLOC_BASE, 0, POOL_SIZE); - g_kmalloc_global = new (kmalloc_start) KmallocGlobalHeap(KMALLOC_BASE, POOL_SIZE); // Place heap at kmalloc_start + // Zero out heap since it's placed after end_of_kernel_bss. + memset(kmalloc_eternal_heap, 0, sizeof(kmalloc_eternal_heap)); + memset(kmalloc_pool_heap, 0, sizeof(kmalloc_pool_heap)); + g_kmalloc_global = new (g_kmalloc_global_heap) KmallocGlobalHeap(kmalloc_pool_heap, sizeof(kmalloc_pool_heap)); s_lock.initialize(); - s_next_eternal_ptr = (u8*)ETERNAL_BASE; - s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE; + s_next_eternal_ptr = kmalloc_eternal_heap; + s_end_of_eternal_range = s_next_eternal_ptr + sizeof(kmalloc_pool_heap); } void* kmalloc_eternal(size_t size) diff --git a/Kernel/Heap/kmalloc.h b/Kernel/Heap/kmalloc.h index ced23e7146..3cd093844a 100644 --- a/Kernel/Heap/kmalloc.h +++ b/Kernel/Heap/kmalloc.h @@ -82,6 +82,3 @@ inline void kfree_aligned(void* ptr) } void kmalloc_enable_expand(); - -extern u8* const kmalloc_start; -extern u8* const kmalloc_end; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index f1028fb47c..a08c255fb6 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -49,6 +49,12 @@ extern FlatPtr start_of_kernel_text; extern FlatPtr start_of_kernel_data; extern FlatPtr end_of_kernel_bss; +extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16]; +extern size_t multiboot_copy_boot_modules_count; + +// Treat the super pages as logically separate from .bss +__attribute__((section(".super_pages"))) static u8 super_pages[1 * MiB]; + namespace Kernel { // NOTE: We can NOT use AK::Singleton for this class, because @@ -103,14 +109,9 @@ void MemoryManager::protect_kernel_image() auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); pte.set_writable(false); } - if (Processor::current().has_feature(CPUFeature::NX)) { - // Disable execution of the kernel data and bss segments, as well as the kernel heap. - for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) { - auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); - pte.set_execute_disabled(true); - } - for (size_t i = FlatPtr(kmalloc_start); i < FlatPtr(kmalloc_end); i += PAGE_SIZE) { + // Disable execution of the kernel data, bss and heap segments. + for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_image; i += PAGE_SIZE) { auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); pte.set_execute_disabled(true); } @@ -120,14 +121,12 @@ void MemoryManager::protect_kernel_image() void MemoryManager::parse_memory_map() { RefPtr<PhysicalRegion> region; - bool region_is_super = false; // We need to make sure we exclude the kmalloc range as well as the kernel image. // The kmalloc range directly follows the kernel image const PhysicalAddress used_range_start(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))); - const PhysicalAddress used_range_end(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(kmalloc_end)))); - klog() << "MM: kernel range: " << used_range_start << " - " << PhysicalAddress(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))); - klog() << "MM: kmalloc range: " << PhysicalAddress(virtual_to_low_physical(FlatPtr(kmalloc_start))) << " - " << used_range_end; + const PhysicalAddress used_range_end(virtual_to_low_physical(FlatPtr(&end_of_kernel_image))); + klog() << "MM: kernel range: " << used_range_start << " - " << used_range_end; auto* mmap = (multiboot_memory_map_t*)(low_physical_to_virtual(multiboot_info_ptr->mmap_addr)); for (; (unsigned long)mmap < (low_physical_to_virtual(multiboot_info_ptr->mmap_addr)) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) { @@ -161,31 +160,32 @@ void MemoryManager::parse_memory_map() for (size_t page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) { auto addr = PhysicalAddress(page_base); - if (addr.get() < used_range_end.get() && addr.get() >= used_range_start.get()) + // Skip used memory ranges. + bool should_skip = false; + for (auto used_range : m_used_memory_ranges) { + if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) { + should_skip = true; + break; + } + } + if (should_skip) continue; - if (page_base < 7 * MiB) { - // nothing - } else if (page_base >= 7 * MiB && page_base < 8 * MiB) { - if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) { - m_super_physical_regions.append(PhysicalRegion::create(addr, addr)); - region = m_super_physical_regions.last(); - region_is_super = true; - } else { - region->expand(region->lower(), addr); - } + // Assign page to user physical region. + if (region.is_null() || region->upper().offset(PAGE_SIZE) != addr) { + m_user_physical_regions.append(PhysicalRegion::create(addr, addr)); + region = m_user_physical_regions.last(); } else { - if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) { - m_user_physical_regions.append(PhysicalRegion::create(addr, addr)); - region = m_user_physical_regions.last(); - region_is_super = false; - } else { - region->expand(region->lower(), addr); - } + region->expand(region->lower(), addr); } } } + // Append statically-allocated super physical region. + m_super_physical_regions.append(PhysicalRegion::create( + PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages))), + PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages)))))); + for (auto& region : m_super_physical_regions) { m_super_physical_pages += region.finalize_capacity(); klog() << "Super physical region: " << region.lower() << " - " << region.upper(); diff --git a/Kernel/linker.ld b/Kernel/linker.ld index 8f8d8f6344..fdb2005ce5 100644 --- a/Kernel/linker.ld +++ b/Kernel/linker.ld @@ -44,13 +44,18 @@ SECTIONS end_of_kernel_data = .; } - .bss ALIGN(4K) : AT (ADDR(.bss) - 0xc0000000) + .bss ALIGN(4K) (NOLOAD) : AT (ADDR(.bss) - 0xc0000000) { start_of_kernel_bss = .; *(page_tables) *(COMMON) *(.bss) end_of_kernel_bss = .; + + . = ALIGN(4K); + *(.heap) + . = ALIGN(4K); + *(.super_pages) } end_of_kernel_image = .; |