diff options
author | Andreas Kling <kling@serenityos.org> | 2020-05-16 10:37:31 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-05-16 10:55:54 +0200 |
commit | ca4f714d68bae771fd7bd118254b8dfd46c9140a (patch) | |
tree | ca93a32e49b2e53fae6a62edfde580c621df130c | |
parent | 154a6e69a4b9f90609161da7387ae0217cf7f8aa (diff) | |
download | serenity-ca4f714d68bae771fd7bd118254b8dfd46c9140a.zip |
Kernel: Use consistent names for kmalloc globals and remove volatile
-rw-r--r-- | Kernel/FileSystem/ProcFS.cpp | 10 | ||||
-rw-r--r-- | Kernel/Heap/kmalloc.cpp | 31 | ||||
-rw-r--r-- | Kernel/Heap/kmalloc.h | 11 |
3 files changed, 25 insertions, 27 deletions
diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index 1e7e15d4d8..0204441842 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -823,15 +823,15 @@ Optional<KBuffer> procfs$memstat(InodeIdentifier) InterruptDisabler disabler; KBufferBuilder builder; JsonObjectSerializer<KBufferBuilder> json { builder }; - json.add("kmalloc_allocated", (u32)sum_alloc); - json.add("kmalloc_available", (u32)sum_free); - json.add("kmalloc_eternal_allocated", (u32)kmalloc_sum_eternal); + json.add("kmalloc_allocated", (u32)g_kmalloc_bytes_allocated); + json.add("kmalloc_available", (u32)g_kmalloc_bytes_free); + json.add("kmalloc_eternal_allocated", (u32)g_kmalloc_bytes_eternal); json.add("user_physical_allocated", MM.user_physical_pages_used()); json.add("user_physical_available", MM.user_physical_pages() - MM.user_physical_pages_used()); json.add("super_physical_allocated", MM.super_physical_pages_used()); json.add("super_physical_available", MM.super_physical_pages() - MM.super_physical_pages_used()); - json.add("kmalloc_call_count", g_kmalloc_call_count); - json.add("kfree_call_count", g_kfree_call_count); + json.add("kmalloc_call_count", (u32)g_kmalloc_call_count); + json.add("kfree_call_count", (u32)g_kfree_call_count); slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) { auto prefix = String::format("slab_%zu", slab_size); json.add(String::format("%s_num_allocated", prefix.characters()), (u32)num_allocated); diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 2cf707bf4a..53465cd4dd 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -56,12 +56,11 @@ struct AllocationHeader { static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8]; -volatile size_t sum_alloc = 0; -volatile size_t sum_free = POOL_SIZE; -volatile size_t kmalloc_sum_eternal = 0; - -u32 g_kmalloc_call_count; -u32 g_kfree_call_count; +size_t g_kmalloc_bytes_allocated = 0; +size_t g_kmalloc_bytes_free = POOL_SIZE; +size_t g_kmalloc_bytes_eternal = 0; +size_t g_kmalloc_call_count; +size_t g_kfree_call_count; bool g_dump_kmalloc_stacks; static u8* s_next_eternal_ptr; @@ -72,9 +71,9 @@ void kmalloc_init() memset(&alloc_map, 0, sizeof(alloc_map)); memset((void*)BASE_PHYSICAL, 0, POOL_SIZE); - kmalloc_sum_eternal = 0; - sum_alloc = 0; - sum_free = POOL_SIZE; + g_kmalloc_bytes_eternal = 0; + g_kmalloc_bytes_allocated = 0; + g_kmalloc_bytes_free = POOL_SIZE; s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL; s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE; @@ -85,7 +84,7 @@ void* kmalloc_eternal(size_t size) void* ptr = s_next_eternal_ptr; s_next_eternal_ptr += size; ASSERT(s_next_eternal_ptr < s_end_of_eternal_range); - kmalloc_sum_eternal += size; + g_kmalloc_bytes_eternal += size; return ptr; } @@ -120,8 +119,8 @@ inline void* kmalloc_allocate(size_t first_chunk, size_t chunks_needed) Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE); bitmap_wrapper.set_range(first_chunk, chunks_needed, true); - sum_alloc += a->allocation_size_in_chunks * CHUNK_SIZE; - sum_free -= a->allocation_size_in_chunks * CHUNK_SIZE; + g_kmalloc_bytes_allocated += a->allocation_size_in_chunks * CHUNK_SIZE; + g_kmalloc_bytes_free -= a->allocation_size_in_chunks * CHUNK_SIZE; #ifdef SANITIZE_KMALLOC memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader)); #endif @@ -141,9 +140,9 @@ void* kmalloc_impl(size_t size) // We need space for the AllocationHeader at the head of the block. size_t real_size = size + sizeof(AllocationHeader); - if (sum_free < real_size) { + if (g_kmalloc_bytes_free < real_size) { Kernel::dump_backtrace(); - klog() << "kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=" << sum_free << ", real_size=" << real_size; + klog() << "kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=" << g_kmalloc_bytes_free << ", real_size=" << real_size; Kernel::hang(); } @@ -183,8 +182,8 @@ void kfree(void* ptr) Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE); bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false); - sum_alloc -= a->allocation_size_in_chunks * CHUNK_SIZE; - sum_free += a->allocation_size_in_chunks * CHUNK_SIZE; + g_kmalloc_bytes_allocated -= a->allocation_size_in_chunks * CHUNK_SIZE; + g_kmalloc_bytes_free += a->allocation_size_in_chunks * CHUNK_SIZE; #ifdef SANITIZE_KMALLOC memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE); diff --git a/Kernel/Heap/kmalloc.h b/Kernel/Heap/kmalloc.h index c895f6578e..ee6e68f8da 100644 --- a/Kernel/Heap/kmalloc.h +++ b/Kernel/Heap/kmalloc.h @@ -42,12 +42,11 @@ void* krealloc(void*, size_t); void kfree(void*); void kfree_aligned(void*); -extern volatile size_t sum_alloc; -extern volatile size_t sum_free; -extern volatile size_t kmalloc_sum_eternal; -extern volatile size_t kmalloc_sum_page_aligned; -extern u32 g_kmalloc_call_count; -extern u32 g_kfree_call_count; +extern size_t g_kmalloc_bytes_allocated; +extern size_t g_kmalloc_bytes_free; +extern size_t g_kmalloc_bytes_eternal; +extern size_t g_kmalloc_call_count; +extern size_t g_kfree_call_count; extern bool g_dump_kmalloc_stacks; inline void* operator new(size_t, void* p) { return p; } |