From 9c384756088658e02ee6fba10030efcce1dc876b Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Fri, 14 May 2021 04:09:06 -0700 Subject: Kernel: Add the ability to verify we don't kmalloc under spinlock. Ideally we would never allocate under a spinlock, as it has many performance and potentially functionality (deadlock) pitfalls. We violate that rule in many places today, but we need a tool to track them all down and fix them. This change introduces a new macro option named `KMALLOC_VERIFY_NO_SPINLOCK_HELD` which can catch these situations at runtime via an assert. --- Kernel/Debug.h.in | 4 ++++ Kernel/Heap/kmalloc.cpp | 13 +++++++++++++ 2 files changed, 17 insertions(+) (limited to 'Kernel') diff --git a/Kernel/Debug.h.in b/Kernel/Debug.h.in index 3ef0ad037b..0db094ad4c 100644 --- a/Kernel/Debug.h.in +++ b/Kernel/Debug.h.in @@ -134,6 +134,10 @@ #cmakedefine01 KMALLOC_DEBUG #endif +#ifndef KMALLOC_VERIFY_NO_SPINLOCK_HELD +#cmakedefine01 KMALLOC_VERIFY_NO_SPINLOCK_HELD +#endif + #ifndef LOCAL_SOCKET_DEBUG #cmakedefine01 LOCAL_SOCKET_DEBUG #endif diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index ee9e7411eb..1e96825f87 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -204,6 +204,14 @@ void kmalloc_enable_expand() g_kmalloc_global->allocate_backup_memory(); } +static inline void kmalloc_verify_nospinlock_held() +{ + // Catch bad callers allocating under spinlock. + if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { + VERIFY(!Processor::current().in_critical()); + } +} + UNMAP_AFTER_INIT void kmalloc_init() { // Zero out heap since it's placed after end_of_kernel_bss. @@ -219,6 +227,8 @@ UNMAP_AFTER_INIT void kmalloc_init() void* kmalloc_eternal(size_t size) { + kmalloc_verify_nospinlock_held(); + size = round_up_to_power_of_two(size, sizeof(void*)); ScopedSpinLock lock(s_lock); @@ -231,6 +241,7 @@ void* kmalloc_eternal(size_t size) void* kmalloc(size_t size) { + kmalloc_verify_nospinlock_held(); ScopedSpinLock lock(s_lock); ++g_kmalloc_call_count; @@ -252,6 +263,7 @@ void kfree(void* ptr) if (!ptr) return; + kmalloc_verify_nospinlock_held(); ScopedSpinLock lock(s_lock); ++g_kfree_call_count; @@ -260,6 +272,7 @@ void kfree(void* ptr) void* krealloc(void* ptr, size_t new_size) { + kmalloc_verify_nospinlock_held(); ScopedSpinLock lock(s_lock); return g_kmalloc_global->m_heap.reallocate(ptr, new_size); } -- cgit v1.2.3