summaryrefslogtreecommitdiff
path: root/LibC
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-05-02 17:06:05 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-05-02 17:06:05 +0200
commitb4e7925e31c34e4751dfe560ab63532cbb225f6c (patch)
treefd649f97fa365c79978d9fc58fb8cac10b4a16c5 /LibC
parent60023ff70ba34bc8c8d31d483b2a1e6db51af1a4 (diff)
downloadserenity-b4e7925e31c34e4751dfe560ab63532cbb225f6c.zip
LibC: Move full ChunkedBlocks to a separate list in the allocator.
This way we only check actually usable blocks when trying to satisfy a new allocation request.
Diffstat (limited to 'LibC')
-rw-r--r--LibC/malloc.cpp37
1 files changed, 27 insertions, 10 deletions
diff --git a/LibC/malloc.cpp b/LibC/malloc.cpp
index 38feddd216..633ec71f3f 100644
--- a/LibC/malloc.cpp
+++ b/LibC/malloc.cpp
@@ -68,6 +68,7 @@ struct ChunkedBlock : public CommonHeader, public InlineLinkedListNode<ChunkedBl
{
return &m_slot[index * m_size];
}
+ bool is_full() const { return m_free_chunks == 0; }
size_t bytes_per_chunk() const { return m_size; }
size_t free_chunks() const { return m_free_chunks; }
size_t used_chunks() const { return chunk_capacity() - m_free_chunks; }
@@ -77,7 +78,8 @@ struct ChunkedBlock : public CommonHeader, public InlineLinkedListNode<ChunkedBl
struct Allocator {
size_t size { 0 };
size_t block_count { 0 };
- InlineLinkedList<ChunkedBlock> blocks;
+ InlineLinkedList<ChunkedBlock> usable_blocks;
+ InlineLinkedList<ChunkedBlock> full_blocks;
};
static Allocator g_allocators[num_size_classes];
@@ -139,7 +141,7 @@ void* malloc(size_t size)
ChunkedBlock* block = nullptr;
- for (block = allocator->blocks.head(); block; block = block->next()) {
+ for (block = allocator->usable_blocks.head(); block; block = block->next()) {
if (block->free_chunks())
break;
}
@@ -150,21 +152,26 @@ void* malloc(size_t size)
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%u)", good_size);
set_mmap_name(block, PAGE_SIZE, buffer);
new (block) ChunkedBlock(good_size);
- allocator->blocks.append(block);
+ allocator->usable_blocks.append(block);
++allocator->block_count;
}
--block->m_free_chunks;
void* ptr = block->m_freelist;
block->m_freelist = block->m_freelist->next;
+ if (block->is_full()) {
#ifdef MALLOC_DEBUG
- dbgprintf("LibC: allocated %p (chunk %d in allocator %p, size %u)\n", ptr, index, page, page->bytes_per_chunk());
+ dbgprintf("Block %p is now full in size class %u\n", block, good_size);
+#endif
+ allocator->usable_blocks.remove(block);
+ allocator->full_blocks.append(block);
+ }
+#ifdef MALLOC_DEBUG
+ dbgprintf("LibC: allocated %p (chunk %d in block %p, size %u)\n", ptr, index, block, block->bytes_per_chunk());
#endif
if (s_scrub_malloc)
memset(ptr, MALLOC_SCRUB_BYTE, block->m_size);
return ptr;
-
- ASSERT_NOT_REACHED();
}
void free(void* ptr)
@@ -195,6 +202,16 @@ void free(void* ptr)
entry->next = block->m_freelist;
block->m_freelist = entry;
+ if (block->is_full()) {
+ size_t good_size;
+ auto* allocator = allocator_for_size(block->m_size, good_size);
+#ifdef MALLOC_DEBUG
+ dbgprintf("Block %p no longer full in size class %u\n", block, good_size);
+#endif
+ allocator->full_blocks.remove(block);
+ allocator->usable_blocks.prepend(block);
+ }
+
++block->m_free_chunks;
if (!block->used_chunks()) {
@@ -204,19 +221,19 @@ void free(void* ptr)
#ifdef MALLOC_DEBUG
dbgprintf("Keeping block %p around for size class %u\n", block, good_size);
#endif
- if (allocator->blocks.tail() != block) {
+ if (allocator->usable_blocks.tail() != block) {
#ifdef MALLOC_DEBUG
dbgprintf("Moving block %p to tail of list for size class %u\n", block, good_size);
#endif
- allocator->blocks.remove(block);
- allocator->blocks.append(block);
+ allocator->usable_blocks.remove(block);
+ allocator->usable_blocks.append(block);
}
return;
}
#ifdef MALLOC_DEBUG
dbgprintf("Releasing block %p for size class %u\n", block, good_size);
#endif
- allocator->blocks.remove(block);
+ allocator->usable_blocks.remove(block);
--allocator->block_count;
os_free(block, PAGE_SIZE);
}