summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-24 16:38:20 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-25 09:48:48 +0200
commitba6e4fb77f00587d2bd57865a00b1a4526684741 (patch)
tree2d92fa94a996fdfcc70e4c9a59f41fa90c469519
parent08a569fbe0567eb4d8aebb08d3021c4103684635 (diff)
downloadserenity-ba6e4fb77f00587d2bd57865a00b1a4526684741.zip
Kernel: Fix kmalloc memory corruption
Rather than hardcoding where the kmalloc pool should be, place it at the end of the kernel image instead. This avoids corrupting global variables or other parts of the kernel as it grows. Fixes #3257
-rw-r--r--Kernel/Heap/kmalloc.cpp20
-rw-r--r--Kernel/Heap/kmalloc.h3
-rw-r--r--Kernel/VM/MemoryManager.cpp27
3 files changed, 40 insertions, 10 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp
index 5936b3641b..8cf9f66812 100644
--- a/Kernel/Heap/kmalloc.cpp
+++ b/Kernel/Heap/kmalloc.cpp
@@ -40,6 +40,7 @@
#include <Kernel/Scheduler.h>
#include <Kernel/SpinLock.h>
#include <Kernel/StdLib.h>
+#include <Kernel/VM/MemoryManager.h>
#define SANITIZE_KMALLOC
@@ -48,13 +49,18 @@ struct AllocationHeader {
u8 data[0];
};
-#define BASE_PHYSICAL (0xc0000000 + (4 * MiB))
#define CHUNK_SIZE 32
#define POOL_SIZE (3 * MiB)
-
-#define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MiB))
#define ETERNAL_RANGE_SIZE (2 * MiB)
+// We need to make sure to not stomp on global variables or other parts
+// of the kernel image!
+extern u32 end_of_kernel_image;
+u8* const kmalloc_start = (u8*)PAGE_ROUND_UP(&end_of_kernel_image);
+u8* const kmalloc_end = kmalloc_start + (ETERNAL_RANGE_SIZE + POOL_SIZE);
+#define ETERNAL_BASE kmalloc_start
+#define KMALLOC_BASE (ETERNAL_BASE + ETERNAL_RANGE_SIZE)
+
static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
size_t g_kmalloc_bytes_allocated = 0;
@@ -72,14 +78,14 @@ static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtr
void kmalloc_init()
{
memset(&alloc_map, 0, sizeof(alloc_map));
- memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
+ memset((void*)KMALLOC_BASE, 0, POOL_SIZE);
s_lock.initialize();
g_kmalloc_bytes_eternal = 0;
g_kmalloc_bytes_allocated = 0;
g_kmalloc_bytes_free = POOL_SIZE;
- s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
+ s_next_eternal_ptr = (u8*)ETERNAL_BASE;
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
}
@@ -117,7 +123,7 @@ void* kmalloc_page_aligned(size_t size)
inline void* kmalloc_allocate(size_t first_chunk, size_t chunks_needed)
{
- auto* a = (AllocationHeader*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
+ auto* a = (AllocationHeader*)(KMALLOC_BASE + (first_chunk * CHUNK_SIZE));
u8* ptr = a->data;
a->allocation_size_in_chunks = chunks_needed;
@@ -178,7 +184,7 @@ static inline void kfree_impl(void* ptr)
++g_kfree_call_count;
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
- FlatPtr start = ((FlatPtr)a - (FlatPtr)BASE_PHYSICAL) / CHUNK_SIZE;
+ FlatPtr start = ((FlatPtr)a - (FlatPtr)KMALLOC_BASE) / CHUNK_SIZE;
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false);
diff --git a/Kernel/Heap/kmalloc.h b/Kernel/Heap/kmalloc.h
index cf28535227..75f0fab642 100644
--- a/Kernel/Heap/kmalloc.h
+++ b/Kernel/Heap/kmalloc.h
@@ -61,3 +61,6 @@ inline void* operator new[](size_t, void* p) { return p; }
#endif
return kmalloc_impl(size);
}
+
+extern u8* const kmalloc_start;
+extern u8* const kmalloc_end;
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 53716a53e9..4a6055d0d6 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -30,6 +30,7 @@
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/CMOS.h>
#include <Kernel/FileSystem/Inode.h>
+#include <Kernel/Heap/kmalloc.h>
#include <Kernel/Multiboot.h>
#include <Kernel/Process.h>
#include <Kernel/VM/AnonymousVMObject.h>
@@ -44,6 +45,8 @@
//#define MM_DEBUG
//#define PAGE_FAULT_DEBUG
+extern u8* start_of_kernel_image;
+extern u8* end_of_kernel_image;
extern FlatPtr start_of_kernel_text;
extern FlatPtr start_of_kernel_data;
extern FlatPtr end_of_kernel_bss;
@@ -82,11 +85,15 @@ void MemoryManager::protect_kernel_image()
}
if (Processor::current().has_feature(CPUFeature::NX)) {
- // Disable execution of the kernel data and bss segments.
+ // Disable execution of the kernel data and bss segments, as well as the kernel heap.
for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) {
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_execute_disabled(true);
}
+ for (size_t i = FlatPtr(kmalloc_start); i < FlatPtr(kmalloc_end); i += PAGE_SIZE) {
+ auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
+ pte.set_execute_disabled(true);
+ }
}
}
@@ -95,6 +102,13 @@ void MemoryManager::parse_memory_map()
RefPtr<PhysicalRegion> region;
bool region_is_super = false;
+ // We need to make sure we exclude the kmalloc range as well as the kernel image.
+ // The kmalloc range directly follows the kernel image
+ const PhysicalAddress used_range_start(virtual_to_low_physical(FlatPtr(&start_of_kernel_image)));
+ const PhysicalAddress used_range_end(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(kmalloc_end))));
+ klog() << "MM: kernel range: " << used_range_start << " - " << PhysicalAddress(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(&end_of_kernel_image))));
+ klog() << "MM: kmalloc range: " << PhysicalAddress(virtual_to_low_physical(FlatPtr(kmalloc_start))) << " - " << used_range_end;
+
auto* mmap = (multiboot_memory_map_t*)(low_physical_to_virtual(multiboot_info_ptr->mmap_addr));
for (; (unsigned long)mmap < (low_physical_to_virtual(multiboot_info_ptr->mmap_addr)) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
klog() << "MM: Multiboot mmap: base_addr = " << String::format("0x%08x", mmap->addr) << ", length = " << String::format("0x%08x", mmap->len) << ", type = 0x" << String::format("%x", mmap->type);
@@ -131,6 +145,9 @@ void MemoryManager::parse_memory_map()
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
auto addr = PhysicalAddress(page_base);
+ if (addr.get() < used_range_end.get() && addr.get() >= used_range_start.get())
+ continue;
+
if (page_base < 7 * MiB) {
// nothing
} else if (page_base >= 7 * MiB && page_base < 8 * MiB) {
@@ -153,11 +170,15 @@ void MemoryManager::parse_memory_map()
}
}
- for (auto& region : m_super_physical_regions)
+ for (auto& region : m_super_physical_regions) {
m_super_physical_pages += region.finalize_capacity();
+ klog() << "Super physical region: " << region.lower() << " - " << region.upper();
+ }
- for (auto& region : m_user_physical_regions)
+ for (auto& region : m_user_physical_regions) {
m_user_physical_pages += region.finalize_capacity();
+ klog() << "User physical region: " << region.lower() << " - " << region.upper();
+ }
ASSERT(m_super_physical_pages > 0);
ASSERT(m_user_physical_pages > 0);