diff options
author | Gunnar Beutner <gbeutner@serenityos.org> | 2021-05-05 05:02:36 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-05-06 10:38:46 +0200 |
commit | 343882943166abf4542a0e6643baf9d80806e944 (patch) | |
tree | 2f80023291fb1ba4c4284f534e155ebb32f41c62 | |
parent | 3aaffa2c47e39583848cfe9b7b9f846e5119fe1b (diff) | |
download | serenity-343882943166abf4542a0e6643baf9d80806e944.zip |
LibC: Lazily initialize malloc chunks
By default malloc manages memory internally in larger blocks. When
one of those blocks is added we initialize a free list by touching
each of the new block's pages, thereby committing all that memory
upfront.
This changes malloc to build the free list on demand which as a
bonus also distributes the latency hit for new blocks more evenly
because the page faults for the zero pages now don't happen all at
once.
-rw-r--r-- | Userland/Libraries/LibC/malloc.cpp | 7 | ||||
-rw-r--r-- | Userland/Libraries/LibC/mallocdefs.h | 9 |
2 files changed, 7 insertions, 9 deletions
diff --git a/Userland/Libraries/LibC/malloc.cpp b/Userland/Libraries/LibC/malloc.cpp index 6b3601ca64..416a4b659b 100644 --- a/Userland/Libraries/LibC/malloc.cpp +++ b/Userland/Libraries/LibC/malloc.cpp @@ -244,8 +244,13 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini --block->m_free_chunks; void* ptr = block->m_freelist; + if (ptr) { + block->m_freelist = block->m_freelist->next; + } else { + ptr = block->m_slot + block->m_next_lazy_freelist_index * block->m_size; + block->m_next_lazy_freelist_index++; + } VERIFY(ptr); - block->m_freelist = block->m_freelist->next; if (block->is_full()) { g_malloc_stats.number_of_blocks_full++; dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size); diff --git a/Userland/Libraries/LibC/mallocdefs.h b/Userland/Libraries/LibC/mallocdefs.h index 71223b2822..ebf90eeaef 100644 --- a/Userland/Libraries/LibC/mallocdefs.h +++ b/Userland/Libraries/LibC/mallocdefs.h @@ -57,18 +57,11 @@ struct ChunkedBlock m_magic = MAGIC_PAGE_HEADER; m_size = bytes_per_chunk; m_free_chunks = chunk_capacity(); - m_freelist = (FreelistEntry*)chunk(0); - for (size_t i = 0; i < chunk_capacity(); ++i) { - auto* entry = (FreelistEntry*)chunk(i); - if (i != chunk_capacity() - 1) - entry->next = (FreelistEntry*)chunk(i + 1); - else - entry->next = nullptr; - } } ChunkedBlock* m_prev { nullptr }; ChunkedBlock* m_next { nullptr }; + size_t m_next_lazy_freelist_index { 0 }; FreelistEntry* m_freelist { nullptr }; size_t m_free_chunks { 0 }; [[gnu::aligned(8)]] unsigned char m_slot[0]; |