summaryrefslogtreecommitdiff
path: root/LibC
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-05-14 16:38:06 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-05-14 16:38:06 +0200
commit5f7bb9d072731468d50e4943f85eb02e5d7f9f03 (patch)
tree1c73c93813affa6c9664a78665bb19dca16f1cec /LibC
parent1e0f9d325c8cf99df8b8f321a207bcbdf782209b (diff)
downloadserenity-5f7bb9d072731468d50e4943f85eb02e5d7f9f03.zip
malloc: Make it possible to recycle big allocation blocks as well.
This patch makes us recycle up to 8 blocks of 4KB size. This should probably be extended to handle other sizes.
Diffstat (limited to 'LibC')
-rw-r--r--LibC/malloc.cpp31
1 files changed, 29 insertions, 2 deletions
diff --git a/LibC/malloc.cpp b/LibC/malloc.cpp
index d79a918474..a2e91fa854 100644
--- a/LibC/malloc.cpp
+++ b/LibC/malloc.cpp
@@ -1,5 +1,6 @@
#include <AK/Bitmap.h>
#include <AK/InlineLinkedList.h>
+#include <AK/Vector.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <assert.h>
@@ -16,6 +17,7 @@
#define PAGE_ROUND_UP(x) ((((size_t)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
static const size_t number_of_chunked_blocks_to_keep_around_per_size_class = 32;
+static const size_t number_of_big_blocks_to_keep_around_per_size_class = 8;
static bool s_log_malloc = false;
static bool s_scrub_malloc = true;
@@ -82,7 +84,12 @@ struct Allocator {
InlineLinkedList<ChunkedBlock> full_blocks;
};
+struct BigAllocator {
+ Vector<BigAllocationBlock*> blocks;
+};
+
static Allocator g_allocators[num_size_classes];
+static BigAllocator g_big_allocators[1];
static Allocator* allocator_for_size(size_t size, size_t& good_size)
{
@@ -96,6 +103,14 @@ static Allocator* allocator_for_size(size_t size, size_t& good_size)
return nullptr;
}
+static BigAllocator* big_allocator_for_size(size_t size)
+{
+ if (size == 4096)
+ return &g_big_allocators[0];
+ return nullptr;
+}
+
+
extern "C" {
size_t malloc_good_size(size_t size)
@@ -130,10 +145,16 @@ void* malloc(size_t size)
auto* allocator = allocator_for_size(size, good_size);
if (!allocator) {
- size_t real_size = sizeof(BigAllocationBlock) + size;
+ size_t real_size = PAGE_ROUND_UP(sizeof(BigAllocationBlock) + size);
+ if (auto* allocator = big_allocator_for_size(real_size)) {
+ if (!allocator->blocks.is_empty()) {
+ auto* block = allocator->blocks.take_last();
+ return &block->m_slot[0];
+ }
+ }
auto* block = (BigAllocationBlock*)os_alloc(real_size);
char buffer[64];
- snprintf(buffer, sizeof(buffer), "malloc: BigAllocationBlock(%u)", good_size);
+ snprintf(buffer, sizeof(buffer), "malloc: BigAllocationBlock(%u)", real_size);
set_mmap_name(block, PAGE_SIZE, buffer);
new (block) BigAllocationBlock(real_size);
return &block->m_slot[0];
@@ -184,6 +205,12 @@ void free(void* ptr)
if (magic == MAGIC_BIGALLOC_HEADER) {
auto* block = (BigAllocationBlock*)page_base;
+ if (auto* allocator = big_allocator_for_size(block->m_size)) {
+ if (allocator->blocks.size() < number_of_big_blocks_to_keep_around_per_size_class) {
+ allocator->blocks.append(block);
+ return;
+ }
+ }
os_free(block, block->m_size);
return;
}