summaryrefslogtreecommitdiff
path: root/Kernel/Heap
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-12-04 19:04:39 +0100
committerAndreas Kling <kling@serenityos.org>2022-12-05 10:29:18 +0100
commitd8a3e2fc4eff03a47a428dc0c4c8d39e50f74c97 (patch)
tree20b369ad8f91250d10e53cd2f6abf839899dd7d9 /Kernel/Heap
parent3f38f610439f60bceafd718855d4fe6c259198fb (diff)
downloadserenity-d8a3e2fc4eff03a47a428dc0c4c8d39e50f74c97.zip
Kernel: Don't memset() allocated memory twice in kcalloc()
This patch adds a way to ask the allocator to skip its internal scrubbing memset operation. Before this change, kcalloc() would scrub twice: once internally in kmalloc() and then again in kcalloc(). The same mechanism already existed in LibC malloc, and this patch brings it over to the kernel heap allocator as well. This solves one FIXME in kcalloc(). :^)
Diffstat (limited to 'Kernel/Heap')
-rw-r--r--Kernel/Heap/Heap.h13
-rw-r--r--Kernel/Heap/kmalloc.cpp28
2 files changed, 27 insertions, 14 deletions
diff --git a/Kernel/Heap/Heap.h b/Kernel/Heap/Heap.h
index 5724d7e918..1ccf4edc3a 100644
--- a/Kernel/Heap/Heap.h
+++ b/Kernel/Heap/Heap.h
@@ -14,6 +14,11 @@
namespace Kernel {
+enum class CallerWillInitializeMemory {
+ No,
+ Yes,
+};
+
template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0>
class Heap {
AK_MAKE_NONCOPYABLE(Heap);
@@ -63,7 +68,7 @@ public:
return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8;
}
- void* allocate(size_t size)
+ void* allocate(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{
// We need space for the AllocationHeader at the head of the block.
size_t real_size = size + sizeof(AllocationHeader);
@@ -92,8 +97,10 @@ public:
m_bitmap.set_range_and_verify_that_all_bits_flip(first_chunk.value(), chunks_needed, true);
m_allocated_chunks += chunks_needed;
- if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) {
- __builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader));
+ if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
+ if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) {
+ __builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader));
+ }
}
return ptr;
}
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp
index e158ca6fc5..3258e83d89 100644
--- a/Kernel/Heap/kmalloc.cpp
+++ b/Kernel/Heap/kmalloc.cpp
@@ -123,7 +123,7 @@ public:
size_t slab_size() const { return m_slab_size; }
- void* allocate()
+ void* allocate(CallerWillInitializeMemory caller_will_initialize_memory)
{
if (m_usable_blocks.is_empty()) {
// FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned().
@@ -141,7 +141,9 @@ public:
if (block->is_full())
m_full_blocks.append(*block);
- memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
+ if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
+ memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
+ }
return ptr;
}
@@ -220,17 +222,17 @@ struct KmallocGlobalData {
subheaps.append(*subheap);
}
- void* allocate(size_t size)
+ void* allocate(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{
VERIFY(!expansion_in_progress);
for (auto& slabheap : slabheaps) {
if (size <= slabheap.slab_size())
- return slabheap.allocate();
+ return slabheap.allocate(caller_will_initialize_memory);
}
for (auto& subheap : subheaps) {
- if (auto* ptr = subheap.allocator.allocate(size))
+ if (auto* ptr = subheap.allocator.allocate(size, caller_will_initialize_memory))
return ptr;
}
@@ -247,14 +249,14 @@ struct KmallocGlobalData {
}
}
if (did_purge)
- return allocate(size);
+ return allocate(size, caller_will_initialize_memory);
}
if (!try_expand(size)) {
PANIC("OOM when trying to expand kmalloc heap.");
}
- return allocate(size);
+ return allocate(size, caller_will_initialize_memory);
}
void deallocate(void* ptr, size_t size)
@@ -419,7 +421,7 @@ UNMAP_AFTER_INIT void kmalloc_init()
s_lock.initialize();
}
-void* kmalloc(size_t size)
+static void* kmalloc_impl(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{
// Catch bad callers allocating under spinlock.
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
@@ -434,7 +436,7 @@ void* kmalloc(size_t size)
Kernel::dump_backtrace();
}
- void* ptr = g_kmalloc_global->allocate(size);
+ void* ptr = g_kmalloc_global->allocate(size, caller_will_initialize_memory);
Thread* current_thread = Thread::current();
if (!current_thread)
@@ -449,13 +451,17 @@ void* kmalloc(size_t size)
return ptr;
}
+void* kmalloc(size_t size)
+{
+ return kmalloc_impl(size, CallerWillInitializeMemory::No);
+}
+
void* kcalloc(size_t count, size_t size)
{
if (Checked<size_t>::multiplication_would_overflow(count, size))
return nullptr;
size_t new_size = count * size;
- auto* ptr = kmalloc(new_size);
- // FIXME: Avoid redundantly scrubbing the memory in kmalloc()
+ auto* ptr = kmalloc_impl(new_size, CallerWillInitializeMemory::Yes);
if (ptr)
memset(ptr, 0, new_size);
return ptr;