summaryrefslogtreecommitdiff
path: root/Kernel/kmalloc.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2018-12-26 21:31:46 +0100
committerAndreas Kling <awesomekling@gmail.com>2018-12-26 21:47:15 +0100
commitf6179ad9f9ba0277fe5f0767cb0985388bf1ee20 (patch)
treea2526b71c855f3d282fbc98eb1f7566bce1ad002 /Kernel/kmalloc.cpp
parent55c722096da724ba93f0a2c5e1f4cb1f59cf9f0e (diff)
downloadserenity-f6179ad9f9ba0277fe5f0767cb0985388bf1ee20.zip
Add slightly better kmalloc_aligned() and kfree_aligned().
Process page directories can now actually be freed. This could definitely be implemented in a nicer, less wasteful way, but this works for now. The spawn stress test can now run for a lot longer but eventually dies due to kmalloc running out of memory.
Diffstat (limited to 'Kernel/kmalloc.cpp')
-rw-r--r--Kernel/kmalloc.cpp34
1 files changed, 18 insertions, 16 deletions
diff --git a/Kernel/kmalloc.cpp b/Kernel/kmalloc.cpp
index 09508bc75e..aae3536720 100644
--- a/Kernel/kmalloc.cpp
+++ b/Kernel/kmalloc.cpp
@@ -32,20 +32,14 @@ static byte alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
volatile size_t sum_alloc = 0;
volatile size_t sum_free = POOL_SIZE;
volatile size_t kmalloc_sum_eternal = 0;
-volatile size_t kmalloc_sum_page_aligned = 0;
static byte* s_next_eternal_ptr;
-static byte* s_next_page_aligned_ptr;
-
static byte* s_end_of_eternal_range;
-static byte* s_end_of_page_aligned_range;
bool is_kmalloc_address(void* ptr)
{
if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
return true;
- if (ptr >= (byte*)PAGE_ALIGNED_BASE_PHYSICAL && ptr < s_next_page_aligned_ptr)
- return true;
return (dword)ptr >= BASE_PHYS && (dword)ptr <= (BASE_PHYS + POOL_SIZE);
}
@@ -55,15 +49,11 @@ void kmalloc_init()
memset( (void *)BASE_PHYS, 0, POOL_SIZE );
kmalloc_sum_eternal = 0;
- kmalloc_sum_page_aligned = 0;
sum_alloc = 0;
sum_free = POOL_SIZE;
s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
- s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
-
s_end_of_eternal_range = s_next_eternal_ptr + RANGE_SIZE;
- s_end_of_page_aligned_range = s_next_page_aligned_ptr + RANGE_SIZE;
}
void* kmalloc_eternal(size_t size)
@@ -75,17 +65,29 @@ void* kmalloc_eternal(size_t size)
return ptr;
}
+void* kmalloc_aligned(size_t size, size_t alignment)
+{
+ void* ptr = kmalloc(size + alignment + sizeof(void*));
+ dword max_addr = (dword)ptr + alignment;
+ void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
+
+ ((void**)aligned_ptr)[-1] = ptr;
+ return aligned_ptr;
+}
+
+void kfree_aligned(void* ptr)
+{
+ kfree(((void**)ptr)[-1]);
+}
+
void* kmalloc_page_aligned(size_t size)
{
- ASSERT((size % PAGE_SIZE) == 0);
- void* ptr = s_next_page_aligned_ptr;
- s_next_page_aligned_ptr += size;
- ASSERT(s_next_page_aligned_ptr < s_end_of_page_aligned_range);
- kmalloc_sum_page_aligned += size;
+ void* ptr = kmalloc_aligned(size, PAGE_SIZE);
+ dword d = (dword)ptr;
+ ASSERT((d & PAGE_MASK) == d);
return ptr;
}
-
void* kmalloc(dword size)
{
InterruptDisabler disabler;