summaryrefslogtreecommitdiff
path: root/Kernel/Heap
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-07-11 14:03:53 +0200
committerAndreas Kling <kling@serenityos.org>2021-07-11 14:14:51 +0200
commit25e850ebb1ca4c1326fda8b8ee36251c5d91ceaf (patch)
tree40bc52ee8c1f15679bd95c9f7d31fa2f7f4dccbc /Kernel/Heap
parent966880eb45330bcdfbf4e0d74ad6e11b4782716c (diff)
downloadserenity-25e850ebb1ca4c1326fda8b8ee36251c5d91ceaf.zip
Kernel: Remove krealloc()
This was only used by a single class (AK::ByteBuffer) in the kernel and not in an OOM-safe way. Now that ByteBuffer no longer uses it, there's no need for the kernel heap to burden itself with supporting this.
Diffstat (limited to 'Kernel/Heap')
-rw-r--r--Kernel/Heap/Heap.h39
-rw-r--r--Kernel/Heap/kmalloc.cpp7
-rw-r--r--Kernel/Heap/kmalloc.h1
3 files changed, 0 insertions, 47 deletions
diff --git a/Kernel/Heap/Heap.h b/Kernel/Heap/Heap.h
index 03d9d0f543..03f38e8c42 100644
--- a/Kernel/Heap/Heap.h
+++ b/Kernel/Heap/Heap.h
@@ -118,34 +118,6 @@ public:
}
}
- template<typename MainHeap>
- void* reallocate(void* ptr, size_t new_size, MainHeap& h)
- {
- if (!ptr)
- return h.allocate(new_size);
-
- auto* a = allocation_header(ptr);
- VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
- VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
-
- size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE - sizeof(AllocationHeader);
-
- if (old_size == new_size)
- return ptr;
-
- auto* new_ptr = h.allocate(new_size);
- if (new_ptr) {
- __builtin_memcpy(new_ptr, ptr, min(old_size, new_size));
- deallocate(ptr);
- }
- return new_ptr;
- }
-
- void* reallocate(void* ptr, size_t new_size)
- {
- return reallocate(ptr, new_size, *this);
- }
-
bool contains(const void* ptr) const
{
const auto* a = allocation_header(ptr);
@@ -319,17 +291,6 @@ public:
VERIFY_NOT_REACHED();
}
- void* reallocate(void* ptr, size_t new_size)
- {
- if (!ptr)
- return allocate(new_size);
- for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
- if (subheap->heap.contains(ptr))
- return subheap->heap.reallocate(ptr, new_size, *this);
- }
- VERIFY_NOT_REACHED();
- }
-
HeapType& add_subheap(void* memory, size_t memory_size)
{
VERIFY(memory_size > sizeof(SubHeap));
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp
index cb6330e4c0..d0ccda22b2 100644
--- a/Kernel/Heap/kmalloc.cpp
+++ b/Kernel/Heap/kmalloc.cpp
@@ -298,13 +298,6 @@ void kfree(void* ptr)
--g_nested_kfree_calls;
}
-void* krealloc(void* ptr, size_t new_size)
-{
- kmalloc_verify_nospinlock_held();
- ScopedSpinLock lock(s_lock);
- return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
-}
-
size_t kmalloc_good_size(size_t size)
{
return size;
diff --git a/Kernel/Heap/kmalloc.h b/Kernel/Heap/kmalloc.h
index 11fa984011..72fdb2e3b7 100644
--- a/Kernel/Heap/kmalloc.h
+++ b/Kernel/Heap/kmalloc.h
@@ -40,7 +40,6 @@ void kmalloc_init();
[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_impl(size_t);
[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_eternal(size_t);
-void* krealloc(void*, size_t);
void kfree(void*);
void kfree_sized(void*, size_t);