summaryrefslogtreecommitdiff
path: root/Userland/Libraries/LibC
diff options
context:
space:
mode:
authorBrian Gianforcaro <bgianf@serenityos.org>2021-06-03 02:23:11 -0700
committerAndreas Kling <kling@serenityos.org>2021-06-03 13:27:40 +0200
commite37f39d980d14757a4d263990a69cd450218074e (patch)
tree1445ccb9e57b61ca997713f2e19c0aa0aa1c90c5 /Userland/Libraries/LibC
parent48da8a568d80fe4a1bf72d4c3fdfe0f3ed5a8c84 (diff)
downloadserenity-e37f39d980d14757a4d263990a69cd450218074e.zip
LibC: Switch ChunkedBlock to IntrusiveList from InlineLinkedList
Diffstat (limited to 'Userland/Libraries/LibC')
-rw-r--r--Userland/Libraries/LibC/malloc.cpp32
-rw-r--r--Userland/Libraries/LibC/mallocdefs.h11
2 files changed, 21 insertions, 22 deletions
diff --git a/Userland/Libraries/LibC/malloc.cpp b/Userland/Libraries/LibC/malloc.cpp
index 3331537293..a88d1e4cb4 100644
--- a/Userland/Libraries/LibC/malloc.cpp
+++ b/Userland/Libraries/LibC/malloc.cpp
@@ -5,7 +5,6 @@
*/
#include <AK/Debug.h>
-#include <AK/InlineLinkedList.h>
#include <AK/ScopedValueRollback.h>
#include <AK/Vector.h>
#include <LibELF/AuxiliaryVector.h>
@@ -100,8 +99,8 @@ static ChunkedBlock* s_cold_empty_blocks[number_of_cold_chunked_blocks_to_keep_a
struct Allocator {
size_t size { 0 };
size_t block_count { 0 };
- InlineLinkedList<ChunkedBlock> usable_blocks;
- InlineLinkedList<ChunkedBlock> full_blocks;
+ ChunkedBlock::List usable_blocks;
+ ChunkedBlock::List full_blocks;
};
struct BigAllocator {
@@ -221,10 +220,11 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
}
ChunkedBlock* block = nullptr;
-
- for (block = allocator->usable_blocks.head(); block; block = block->next()) {
- if (block->free_chunks())
+ for (auto& current : allocator->usable_blocks) {
+ if (current.free_chunks()) {
+ block = &current;
break;
+ }
}
if (!block && s_hot_empty_block_count) {
@@ -237,7 +237,7 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
set_mmap_name(block, ChunkedBlock::block_size, buffer);
}
- allocator->usable_blocks.append(block);
+ allocator->usable_blocks.append(*block);
}
if (!block && s_cold_empty_block_count) {
@@ -260,7 +260,7 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
new (block) ChunkedBlock(good_size);
ue_notify_chunk_size_changed(block, good_size);
}
- allocator->usable_blocks.append(block);
+ allocator->usable_blocks.append(*block);
}
if (!block) {
@@ -269,7 +269,7 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
block = (ChunkedBlock*)os_alloc(ChunkedBlock::block_size, buffer);
new (block) ChunkedBlock(good_size);
- allocator->usable_blocks.append(block);
+ allocator->usable_blocks.append(*block);
++allocator->block_count;
}
@@ -285,8 +285,8 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
if (block->is_full()) {
g_malloc_stats.number_of_blocks_full++;
dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);
- allocator->usable_blocks.remove(block);
- allocator->full_blocks.append(block);
+ allocator->usable_blocks.remove(*block);
+ allocator->full_blocks.append(*block);
}
dbgln_if(MALLOC_DEBUG, "LibC: allocated {:p} (chunk in block {:p}, size {})", ptr, block, block->bytes_per_chunk());
@@ -353,8 +353,8 @@ static void free_impl(void* ptr)
auto* allocator = allocator_for_size(block->m_size, good_size);
dbgln_if(MALLOC_DEBUG, "Block {:p} no longer full in size class {}", block, good_size);
g_malloc_stats.number_of_freed_full_blocks++;
- allocator->full_blocks.remove(block);
- allocator->usable_blocks.prepend(block);
+ allocator->full_blocks.remove(*block);
+ allocator->usable_blocks.prepend(*block);
}
++block->m_free_chunks;
@@ -365,14 +365,14 @@ static void free_impl(void* ptr)
if (s_hot_empty_block_count < number_of_hot_chunked_blocks_to_keep_around) {
dbgln_if(MALLOC_DEBUG, "Keeping hot block {:p} around", block);
g_malloc_stats.number_of_hot_keeps++;
- allocator->usable_blocks.remove(block);
+ allocator->usable_blocks.remove(*block);
s_hot_empty_blocks[s_hot_empty_block_count++] = block;
return;
}
if (s_cold_empty_block_count < number_of_cold_chunked_blocks_to_keep_around) {
dbgln_if(MALLOC_DEBUG, "Keeping cold block {:p} around", block);
g_malloc_stats.number_of_cold_keeps++;
- allocator->usable_blocks.remove(block);
+ allocator->usable_blocks.remove(*block);
s_cold_empty_blocks[s_cold_empty_block_count++] = block;
mprotect(block, ChunkedBlock::block_size, PROT_NONE);
madvise(block, ChunkedBlock::block_size, MADV_SET_VOLATILE);
@@ -380,7 +380,7 @@ static void free_impl(void* ptr)
}
dbgln_if(MALLOC_DEBUG, "Releasing block {:p} for size class {}", block, good_size);
g_malloc_stats.number_of_frees++;
- allocator->usable_blocks.remove(block);
+ allocator->usable_blocks.remove(*block);
--allocator->block_count;
os_free(block, ChunkedBlock::block_size);
}
diff --git a/Userland/Libraries/LibC/mallocdefs.h b/Userland/Libraries/LibC/mallocdefs.h
index ebf90eeaef..991c4b003b 100644
--- a/Userland/Libraries/LibC/mallocdefs.h
+++ b/Userland/Libraries/LibC/mallocdefs.h
@@ -6,7 +6,7 @@
#pragma once
-#include <AK/InlineLinkedList.h>
+#include <AK/IntrusiveList.h>
#include <AK/Types.h>
#define MAGIC_PAGE_HEADER 0x42657274 // 'Bert'
@@ -45,9 +45,7 @@ struct FreelistEntry {
FreelistEntry* next;
};
-struct ChunkedBlock
- : public CommonHeader
- , public InlineLinkedListNode<ChunkedBlock> {
+struct ChunkedBlock : public CommonHeader {
static constexpr size_t block_size = 64 * KiB;
static constexpr size_t block_mask = ~(block_size - 1);
@@ -59,8 +57,7 @@ struct ChunkedBlock
m_free_chunks = chunk_capacity();
}
- ChunkedBlock* m_prev { nullptr };
- ChunkedBlock* m_next { nullptr };
+ IntrusiveListNode<ChunkedBlock> m_list_node;
size_t m_next_lazy_freelist_index { 0 };
FreelistEntry* m_freelist { nullptr };
size_t m_free_chunks { 0 };
@@ -75,4 +72,6 @@ struct ChunkedBlock
size_t free_chunks() const { return m_free_chunks; }
size_t used_chunks() const { return chunk_capacity() - m_free_chunks; }
size_t chunk_capacity() const { return (block_size - sizeof(ChunkedBlock)) / m_size; }
+
+ using List = IntrusiveList<ChunkedBlock, RawPtr<ChunkedBlock>, &ChunkedBlock::m_list_node>;
};