diff options
-rw-r--r-- | Kernel/FileSystem/ProcFS.cpp | 14 | ||||
-rw-r--r-- | Kernel/Heap/Heap.h | 363 | ||||
-rw-r--r-- | Kernel/Heap/kmalloc.cpp | 247 | ||||
-rw-r--r-- | Kernel/Heap/kmalloc.h | 37 | ||||
-rw-r--r-- | Kernel/Thread.cpp | 2 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 9 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.h | 1 |
7 files changed, 541 insertions, 132 deletions
diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index eaa648c669..b4e55238c4 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -797,17 +797,21 @@ static Optional<KBuffer> procfs$cpuinfo(InodeIdentifier) Optional<KBuffer> procfs$memstat(InodeIdentifier) { InterruptDisabler disabler; + + kmalloc_stats stats; + get_kmalloc_stats(stats); + KBufferBuilder builder; JsonObjectSerializer<KBufferBuilder> json { builder }; - json.add("kmalloc_allocated", g_kmalloc_bytes_allocated); - json.add("kmalloc_available", g_kmalloc_bytes_free); - json.add("kmalloc_eternal_allocated", g_kmalloc_bytes_eternal); + json.add("kmalloc_allocated", stats.bytes_allocated); + json.add("kmalloc_available", stats.bytes_free); + json.add("kmalloc_eternal_allocated", stats.bytes_eternal); json.add("user_physical_allocated", MM.user_physical_pages_used()); json.add("user_physical_available", MM.user_physical_pages() - MM.user_physical_pages_used()); json.add("super_physical_allocated", MM.super_physical_pages_used()); json.add("super_physical_available", MM.super_physical_pages() - MM.super_physical_pages_used()); - json.add("kmalloc_call_count", g_kmalloc_call_count); - json.add("kfree_call_count", g_kfree_call_count); + json.add("kmalloc_call_count", stats.kmalloc_call_count); + json.add("kfree_call_count", stats.kfree_call_count); slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) { auto prefix = String::format("slab_%zu", slab_size); json.add(String::format("%s_num_allocated", prefix.characters()), num_allocated); diff --git a/Kernel/Heap/Heap.h b/Kernel/Heap/Heap.h new file mode 100644 index 0000000000..959e678758 --- /dev/null +++ b/Kernel/Heap/Heap.h @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2020, The SerenityOS developers. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include <AK/Bitmap.h> +#include <AK/ScopeGuard.h> +#include <AK/Vector.h> +#include <AK/kmalloc.h> + +namespace Kernel { + +template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0> +class Heap { + AK_MAKE_NONCOPYABLE(Heap); + + struct AllocationHeader { + size_t allocation_size_in_chunks; + u8 data[0]; + }; + + static size_t calculate_chunks(size_t memory_size) + { + return (sizeof(u8) * memory_size) / (sizeof(u8) * CHUNK_SIZE + 1); + } + +public: + Heap(u8* memory, size_t memory_size) + : m_total_chunks(calculate_chunks(memory_size)) + , m_chunks(memory) + , m_bitmap(Bitmap::wrap(memory + m_total_chunks * CHUNK_SIZE, m_total_chunks)) + { + // To keep the alignment of the memory passed in, place the bitmap + // at the end of the memory block. + ASSERT(m_total_chunks * CHUNK_SIZE + (m_total_chunks + 7) / 8 <= memory_size); + } + ~Heap() + { + } + + static size_t calculate_memory_for_bytes(size_t bytes) + { + size_t needed_chunks = (sizeof(AllocationHeader) + bytes + CHUNK_SIZE - 1) / CHUNK_SIZE; + return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8; + } + + void* allocate(size_t size) + { + // We need space for the AllocationHeader at the head of the block. + size_t real_size = size + sizeof(AllocationHeader); + size_t chunks_needed = (real_size + CHUNK_SIZE - 1) / CHUNK_SIZE; + + if (chunks_needed > free_chunks()) + return nullptr; + + Optional<size_t> first_chunk; + + // Choose the right politic for allocation. + constexpr u32 best_fit_threshold = 128; + if (chunks_needed < best_fit_threshold) { + first_chunk = m_bitmap.find_first_fit(chunks_needed); + } else { + first_chunk = m_bitmap.find_best_fit(chunks_needed); + } + + if (!first_chunk.has_value()) + return nullptr; + + auto* a = (AllocationHeader*)(m_chunks + (first_chunk.value() * CHUNK_SIZE)); + u8* ptr = a->data; + a->allocation_size_in_chunks = chunks_needed; + + m_bitmap.set_range(first_chunk.value(), chunks_needed, true); + + m_allocated_chunks += chunks_needed; + if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) { + __builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader)); + } + return ptr; + } + + void deallocate(void* ptr) + { + if (!ptr) + return; + auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader))); + ASSERT((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE); + ASSERT((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE); + FlatPtr start = ((FlatPtr)a - (FlatPtr)m_chunks) / CHUNK_SIZE; + + m_bitmap.set_range(start, a->allocation_size_in_chunks, false); + + ASSERT(m_allocated_chunks >= a->allocation_size_in_chunks); + m_allocated_chunks -= a->allocation_size_in_chunks; + + if constexpr (HEAP_SCRUB_BYTE_FREE != 0) { + __builtin_memset(a, HEAP_SCRUB_BYTE_FREE, a->allocation_size_in_chunks * CHUNK_SIZE); + } + } + + template<typename MainHeap> + void* reallocate(void* ptr, size_t new_size, MainHeap& h) + { + if (!ptr) + return h.allocate(new_size); + + auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader))); + ASSERT((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE); + ASSERT((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE); + + size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE; + + if (old_size == new_size) + return ptr; + + auto* new_ptr = h.allocate(new_size); + if (new_ptr) + __builtin_memcpy(new_ptr, ptr, min(old_size, new_size)); + deallocate(ptr); + return new_ptr; + } + + void* reallocate(void* ptr, size_t new_size) + { + return reallocate(ptr, new_size, *this); + } + + bool contains(const void* ptr) const + { + const auto* a = (const AllocationHeader*)((((const u8*)ptr) - sizeof(AllocationHeader))); + if ((const u8*)a < m_chunks) + return false; + if ((const u8*)ptr >= m_chunks + m_total_chunks * CHUNK_SIZE) + return false; + return true; + } + + u8* memory() const { return m_chunks; } + + size_t total_chunks() const { return m_total_chunks; } + size_t total_bytes() const { return m_total_chunks * CHUNK_SIZE; } + size_t free_chunks() const { return m_total_chunks - m_allocated_chunks; }; + size_t free_bytes() const { return free_chunks() * CHUNK_SIZE; } + size_t allocated_chunks() const { return m_allocated_chunks; } + size_t allocated_bytes() const { return m_allocated_chunks * CHUNK_SIZE; } + +private: + size_t m_total_chunks { 0 }; + size_t m_allocated_chunks { 0 }; + u8* m_chunks { nullptr }; + Bitmap m_bitmap; +}; + +template<typename ExpandHeap> +struct ExpandableHeapTraits { + static bool add_memory(ExpandHeap& expand, size_t allocation_request) + { + return expand.add_memory(allocation_request); + } + + static bool remove_memory(ExpandHeap& expand, void* memory) + { + return expand.remove_memory(memory); + } +}; + +struct DefaultExpandHeap { + bool add_memory(size_t) + { + // Requires explicit implementation + return false; + } + + bool remove_memory(void*) + { + return false; + } +}; + +template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0, typename ExpandHeap = DefaultExpandHeap> +class ExpandableHeap { + AK_MAKE_NONCOPYABLE(ExpandableHeap); + AK_MAKE_NONMOVABLE(ExpandableHeap); + +public: + typedef ExpandHeap ExpandHeapType; + typedef Heap<CHUNK_SIZE, HEAP_SCRUB_BYTE_ALLOC, HEAP_SCRUB_BYTE_FREE> HeapType; + + struct SubHeap { + HeapType heap; + SubHeap* next { nullptr }; + + template<typename... Args> + SubHeap(Args&&... args) + : heap(forward<Args>(args)...) + { + } + }; + + ExpandableHeap(u8* memory, size_t memory_size, const ExpandHeapType& expand = ExpandHeapType()) + : m_heaps(memory, memory_size) + , m_expand(expand) + { + } + ~ExpandableHeap() + { + // We don't own the main heap, only remove memory that we added previously + SubHeap* next; + for (auto* heap = m_heaps.next; heap; heap = next) { + next = heap->next; + + heap->~SubHeap(); + ExpandableHeapTraits<ExpandHeap>::remove_memory(m_expand, (void*)heap); + } + } + + static size_t calculate_memory_for_bytes(size_t bytes) + { + return sizeof(SubHeap) + HeapType::calculate_memory_for_bytes(bytes); + } + + void* allocate(size_t size) + { + do { + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) { + if (void* ptr = subheap->heap.allocate(size)) + return ptr; + } + + // We need to loop because we won't know how much memory was added. + // Even though we make a best guess how much memory needs to be added, + // it doesn't guarantee that enough will be available after adding it. + // This is especially true for the kmalloc heap, where adding memory + // requires several other objects to be allocated just to be able to + // expand the heap. + } while (ExpandableHeapTraits<ExpandHeap>::add_memory(m_expand, size)); + return nullptr; + } + + void deallocate(void* ptr) + { + if (!ptr) + return; + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) { + if (subheap->heap.contains(ptr)) { + subheap->heap.deallocate(ptr); + if (subheap->heap.allocated_chunks() == 0 && subheap != &m_heaps) { + // Since remove_memory may free subheap, we need to save the + // next pointer before calling it + auto* next_subheap = subheap->next; + if (ExpandableHeapTraits<ExpandHeap>::remove_memory(m_expand, subheap)) { + auto* subheap2 = m_heaps.next; + auto** subheap_link = &m_heaps.next; + while (subheap2 != subheap) { + subheap_link = &subheap2->next; + subheap2 = subheap2->next; + } + *subheap_link = next_subheap; + } + } + return; + } + } + ASSERT_NOT_REACHED(); + } + + void* reallocate(void* ptr, size_t new_size) + { + if (!ptr) + return allocate(new_size); + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) { + if (subheap->heap.contains(ptr)) + return subheap->heap.reallocate(ptr, new_size, *this); + } + ASSERT_NOT_REACHED(); + } + + HeapType& add_subheap(void* memory, size_t memory_size) + { + ASSERT(memory_size > sizeof(SubHeap)); + + // Place the SubHeap structure at the beginning of the new memory block + memory_size -= sizeof(SubHeap); + SubHeap* new_heap = (SubHeap*)memory; + new (new_heap) SubHeap((u8*)(new_heap + 1), memory_size); + + // Add the subheap to the list (but leave the main heap where it is) + SubHeap* next_heap = m_heaps.next; + SubHeap** next_heap_link = &m_heaps.next; + while (next_heap) { + if (new_heap->heap.memory() < next_heap->heap.memory()) + break; + next_heap_link = &next_heap->next; + next_heap = next_heap->next; + } + new_heap->next = *next_heap_link; + *next_heap_link = new_heap; + return new_heap->heap; + } + + bool contains(const void* ptr) const + { + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) { + if (subheap->heap.contains(ptr)) + return true; + } + return false; + } + + size_t total_chunks() const + { + size_t total = 0; + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) + total += subheap->heap.total_chunks(); + return total; + } + size_t total_bytes() const { return total_chunks() * CHUNK_SIZE; } + size_t free_chunks() const + { + size_t total = 0; + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) + total += subheap->heap.free_chunks(); + return total; + } + size_t free_bytes() const { return free_chunks() * CHUNK_SIZE; } + size_t allocated_chunks() const + { + size_t total = 0; + for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) + total += subheap->heap.allocated_chunks(); + return total; + } + size_t allocated_bytes() const { return allocated_chunks() * CHUNK_SIZE; } + +private: + SubHeap m_heaps; + ExpandHeap m_expand; +}; + +} diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 8cf9f66812..be29e0f5aa 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -30,10 +30,12 @@ */ #include <AK/Assertions.h> -#include <AK/Bitmap.h> +#include <AK/NonnullOwnPtrVector.h> #include <AK/Optional.h> +#include <AK/StringView.h> #include <AK/Types.h> #include <Kernel/Arch/i386/CPU.h> +#include <Kernel/Heap/Heap.h> #include <Kernel/Heap/kmalloc.h> #include <Kernel/KSyms.h> #include <Kernel/Process.h> @@ -44,30 +46,118 @@ #define SANITIZE_KMALLOC -struct AllocationHeader { - size_t allocation_size_in_chunks; - u8 data[0]; -}; - #define CHUNK_SIZE 32 -#define POOL_SIZE (3 * MiB) +#define POOL_SIZE (2 * MiB) #define ETERNAL_RANGE_SIZE (2 * MiB) +struct KmallocGlobalHeap { + struct ExpandGlobalHeap { + KmallocGlobalHeap& m_global_heap; + + ExpandGlobalHeap(KmallocGlobalHeap& global_heap) + : m_global_heap(global_heap) + { + } + + bool add_memory(size_t allocation_request) + { + if (!MemoryManager::is_initialized()) { + klog() << "kmalloc(): Cannot expand heap before MM is initialized!"; + return false; + } + // At this point we have very little memory left. Any attempt to + // kmalloc() could fail, so use our backup memory first, so we + // can't really reliably allocate even a new region of memory. + // This is why we keep a backup region, which we can + auto region = move(m_global_heap.m_backup_memory); + if (!region) { + klog() << "kmalloc(): Cannot expand heap: no backup memory"; + return false; + } + + klog() << "kmalloc(): Adding memory to heap at " << region->vaddr() << ", bytes: " << region->size(); + + auto& subheap = m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size()); + m_global_heap.m_subheap_memory.append(region.release_nonnull()); + + // Since we pulled in our backup heap, make sure we allocate another + // backup heap before returning. Otherwise we potentially lose + // the ability to expand the heap next time we get called. + ScopeGuard guard([&]() { + m_global_heap.allocate_backup_memory(); + }); + + // Now that we added our backup memory, check if the backup heap + // was big enough to likely satisfy the request + if (subheap.free_bytes() < allocation_request) { + // Looks like we probably need more + size_t memory_size = max(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request), (size_t)(1 * MiB)); + region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write); + if (region) { + klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size(); + + m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size()); + m_global_heap.m_subheap_memory.append(region.release_nonnull()); + } else { + klog() << "kmalloc(): Could not expand heap to satisfy allocation of " << allocation_request << " bytes"; + return false; + } + } + return true; + } + + bool remove_memory(void* memory) + { + // This is actually relatively unlikely to happen, because it requires that all + // allocated memory in a subheap to be freed. Only then the subheap can be removed... + for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) { + if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) { + auto region = m_global_heap.m_subheap_memory.take(i); + klog() << "kmalloc(): Removing memory from heap at " << region->vaddr() << ", bytes: " << region->size(); + return true; + } + } + + klog() << "kmalloc(): Cannot remove memory from heap: " << VirtualAddress(memory); + return false; + } + }; + typedef ExpandableHeap<CHUNK_SIZE, KMALLOC_SCRUB_BYTE, KFREE_SCRUB_BYTE, ExpandGlobalHeap> HeapType; + + HeapType m_heap; + NonnullOwnPtrVector<Region> m_subheap_memory; + OwnPtr<Region> m_backup_memory; + + KmallocGlobalHeap(u8* memory, size_t memory_size) + : m_heap(memory, memory_size, ExpandGlobalHeap(*this)) + { + } + void allocate_backup_memory() + { + if (m_backup_memory) + return; + m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write); + } + + size_t backup_memory_bytes() const + { + return m_backup_memory ? m_backup_memory->size() : 0; + } +}; + +static KmallocGlobalHeap* g_kmalloc_global; + // We need to make sure to not stomp on global variables or other parts // of the kernel image! extern u32 end_of_kernel_image; u8* const kmalloc_start = (u8*)PAGE_ROUND_UP(&end_of_kernel_image); -u8* const kmalloc_end = kmalloc_start + (ETERNAL_RANGE_SIZE + POOL_SIZE); -#define ETERNAL_BASE kmalloc_start +u8* const kmalloc_end = kmalloc_start + (ETERNAL_RANGE_SIZE + POOL_SIZE) + sizeof(KmallocGlobalHeap); +#define ETERNAL_BASE (kmalloc_start + sizeof(KmallocGlobalHeap)) #define KMALLOC_BASE (ETERNAL_BASE + ETERNAL_RANGE_SIZE) -static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8]; - -size_t g_kmalloc_bytes_allocated = 0; -size_t g_kmalloc_bytes_free = POOL_SIZE; -size_t g_kmalloc_bytes_eternal = 0; -size_t g_kmalloc_call_count; -size_t g_kfree_call_count; +static size_t g_kmalloc_bytes_eternal = 0; +static size_t g_kmalloc_call_count; +static size_t g_kfree_call_count; bool g_dump_kmalloc_stacks; static u8* s_next_eternal_ptr; @@ -75,15 +165,17 @@ static u8* s_end_of_eternal_range; static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace() +void kmalloc_enable_expand() +{ + g_kmalloc_global->allocate_backup_memory(); +} + void kmalloc_init() { - memset(&alloc_map, 0, sizeof(alloc_map)); memset((void*)KMALLOC_BASE, 0, POOL_SIZE); - s_lock.initialize(); + g_kmalloc_global = new (kmalloc_start) KmallocGlobalHeap(KMALLOC_BASE, POOL_SIZE); // Place heap at kmalloc_start - g_kmalloc_bytes_eternal = 0; - g_kmalloc_bytes_allocated = 0; - g_kmalloc_bytes_free = POOL_SIZE; + s_lock.initialize(); s_next_eternal_ptr = (u8*)ETERNAL_BASE; s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE; @@ -99,45 +191,6 @@ void* kmalloc_eternal(size_t size) return ptr; } -void* kmalloc_aligned(size_t size, size_t alignment) -{ - void* ptr = kmalloc(size + alignment + sizeof(void*)); - size_t max_addr = (size_t)ptr + alignment; - void* aligned_ptr = (void*)(max_addr - (max_addr % alignment)); - ((void**)aligned_ptr)[-1] = ptr; - return aligned_ptr; -} - -void kfree_aligned(void* ptr) -{ - kfree(((void**)ptr)[-1]); -} - -void* kmalloc_page_aligned(size_t size) -{ - void* ptr = kmalloc_aligned(size, PAGE_SIZE); - size_t d = (size_t)ptr; - ASSERT((d & PAGE_MASK) == d); - return ptr; -} - -inline void* kmalloc_allocate(size_t first_chunk, size_t chunks_needed) -{ - auto* a = (AllocationHeader*)(KMALLOC_BASE + (first_chunk * CHUNK_SIZE)); - u8* ptr = a->data; - a->allocation_size_in_chunks = chunks_needed; - - Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE); - bitmap_wrapper.set_range(first_chunk, chunks_needed, true); - - g_kmalloc_bytes_allocated += a->allocation_size_in_chunks * CHUNK_SIZE; - g_kmalloc_bytes_free -= a->allocation_size_in_chunks * CHUNK_SIZE; -#ifdef SANITIZE_KMALLOC - memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader)); -#endif - return ptr; -} - void* kmalloc_impl(size_t size) { ScopedSpinLock lock(s_lock); @@ -148,53 +201,14 @@ void* kmalloc_impl(size_t size) Kernel::dump_backtrace(); } - // We need space for the AllocationHeader at the head of the block. - size_t real_size = size + sizeof(AllocationHeader); - - if (g_kmalloc_bytes_free < real_size) { - Kernel::dump_backtrace(); - klog() << "kmalloc(): PANIC! Out of memory\nsum_free=" << g_kmalloc_bytes_free << ", real_size=" << real_size; - Processor::halt(); - } - - size_t chunks_needed = (real_size + CHUNK_SIZE - 1) / CHUNK_SIZE; - - Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE); - Optional<size_t> first_chunk; - - // Choose the right politic for allocation. - constexpr u32 best_fit_threshold = 128; - if (chunks_needed < best_fit_threshold) { - first_chunk = bitmap_wrapper.find_first_fit(chunks_needed); - } else { - first_chunk = bitmap_wrapper.find_best_fit(chunks_needed); - } - - if (!first_chunk.has_value()) { + void* ptr = g_kmalloc_global->m_heap.allocate(size); + if (!ptr) { klog() << "kmalloc(): PANIC! Out of memory (no suitable block for size " << size << ")"; Kernel::dump_backtrace(); Processor::halt(); } - return kmalloc_allocate(first_chunk.value(), chunks_needed); -} - -static inline void kfree_impl(void* ptr) -{ - ++g_kfree_call_count; - - auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader))); - FlatPtr start = ((FlatPtr)a - (FlatPtr)KMALLOC_BASE) / CHUNK_SIZE; - - Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE); - bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false); - - g_kmalloc_bytes_allocated -= a->allocation_size_in_chunks * CHUNK_SIZE; - g_kmalloc_bytes_free += a->allocation_size_in_chunks * CHUNK_SIZE; - -#ifdef SANITIZE_KMALLOC - memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE); -#endif + return ptr; } void kfree(void* ptr) @@ -203,26 +217,15 @@ void kfree(void* ptr) return; ScopedSpinLock lock(s_lock); - kfree_impl(ptr); + ++g_kfree_call_count; + + g_kmalloc_global->m_heap.deallocate(ptr); } void* krealloc(void* ptr, size_t new_size) { - if (!ptr) - return kmalloc(new_size); - ScopedSpinLock lock(s_lock); - - auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader))); - size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE; - - if (old_size == new_size) - return ptr; - - auto* new_ptr = kmalloc(new_size); - memcpy(new_ptr, ptr, min(old_size, new_size)); - kfree_impl(ptr); - return new_ptr; + return g_kmalloc_global->m_heap.reallocate(ptr, new_size); } void* operator new(size_t size) @@ -234,3 +237,13 @@ void* operator new[](size_t size) { return kmalloc(size); } + +void get_kmalloc_stats(kmalloc_stats& stats) +{ + ScopedSpinLock lock(s_lock); + stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes(); + stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes(); + stats.bytes_eternal = g_kmalloc_bytes_eternal; + stats.kmalloc_call_count = g_kmalloc_call_count; + stats.kfree_call_count = g_kfree_call_count; +} diff --git a/Kernel/Heap/kmalloc.h b/Kernel/Heap/kmalloc.h index 75f0fab642..ced23e7146 100644 --- a/Kernel/Heap/kmalloc.h +++ b/Kernel/Heap/kmalloc.h @@ -36,17 +36,19 @@ void kmalloc_init(); [[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_impl(size_t); [[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_eternal(size_t); -[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_page_aligned(size_t); -[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_aligned(size_t, size_t alignment); + void* krealloc(void*, size_t); void kfree(void*); -void kfree_aligned(void*); -extern size_t g_kmalloc_bytes_allocated; -extern size_t g_kmalloc_bytes_free; -extern size_t g_kmalloc_bytes_eternal; -extern size_t g_kmalloc_call_count; -extern size_t g_kfree_call_count; +struct kmalloc_stats { + size_t bytes_allocated; + size_t bytes_free; + size_t bytes_eternal; + size_t kmalloc_call_count; + size_t kfree_call_count; +}; +void get_kmalloc_stats(kmalloc_stats&); + extern bool g_dump_kmalloc_stacks; inline void* operator new(size_t, void* p) { return p; } @@ -62,5 +64,24 @@ inline void* operator new[](size_t, void* p) { return p; } return kmalloc_impl(size); } +template<size_t ALIGNMENT> +[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] inline void* kmalloc_aligned(size_t size) +{ + static_assert(ALIGNMENT > 1); + static_assert(ALIGNMENT < 255); + void* ptr = kmalloc(size + ALIGNMENT + sizeof(u8)); + size_t max_addr = (size_t)ptr + ALIGNMENT; + void* aligned_ptr = (void*)(max_addr - (max_addr % ALIGNMENT)); + ((u8*)aligned_ptr)[-1] = (u8)((u8*)aligned_ptr - (u8*)ptr); + return aligned_ptr; +} + +inline void kfree_aligned(void* ptr) +{ + kfree((u8*)ptr - ((u8*)ptr)[-1]); +} + +void kmalloc_enable_expand(); + extern u8* const kmalloc_start; extern u8* const kmalloc_end; diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index b8ab891c3c..7f7cc24065 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -60,7 +60,7 @@ Thread::Thread(NonnullRefPtr<Process> process) dbg() << "Created new thread " << m_process->name() << "(" << m_process->pid().value() << ":" << m_tid.value() << ")"; #endif set_default_signal_dispositions(); - m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16); + m_fpu_state = (FPUState*)kmalloc_aligned<16>(sizeof(FPUState)); reset_fpu_state(); memset(&m_tss, 0, sizeof(m_tss)); m_tss.iomapbase = sizeof(TSS32); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index fc8f8da12a..15fdc65b75 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -65,6 +65,11 @@ MemoryManager& MM return *s_the; } +bool MemoryManager::is_initialized() +{ + return s_the != nullptr; +} + MemoryManager::MemoryManager() { ScopedSpinLock lock(s_mm_lock); @@ -282,8 +287,10 @@ void MemoryManager::initialize(u32 cpu) #endif Processor::current().set_mm_data(*mm_data); - if (cpu == 0) + if (cpu == 0) { s_the = new MemoryManager; + kmalloc_enable_expand(); + } } Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index a66bb551d4..1da2c9edee 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -86,6 +86,7 @@ class MemoryManager { public: static MemoryManager& the(); + static bool is_initialized(); static void initialize(u32 cpu); |