summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorkleines Filmröllchen <malu.bertsch@gmail.com>2022-01-08 23:36:13 +0100
committerLinus Groh <mail@linusgroh.de>2022-01-11 00:08:58 +0100
commite2c9578390ec31519695131e46f140056d39f7fd (patch)
treef0fc76ca36aa84b92159b0bc26e1db142acc464c /Kernel
parentb8d640c3f9320f638161e711cf4fdc678dbef073 (diff)
downloadserenity-e2c9578390ec31519695131e46f140056d39f7fd.zip
Kernel: Allow preventing kmalloc and kfree
For "destructive" disallowance of allocations throughout the system, Thread gains a member that controls whether allocations are currently allowed or not. kmalloc checks this member on both allocations and deallocations (with the exception of early boot) and panics the kernel if allocations are disabled. This will allow for critical sections that can't be allowed to allocate to fail-fast, making for easier debugging. PS: My first proper Kernel commit :^)
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Heap/kmalloc.cpp10
-rw-r--r--Kernel/Thread.h4
2 files changed, 12 insertions, 2 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp
index 6a3c21211f..5f8800acdb 100644
--- a/Kernel/Heap/kmalloc.cpp
+++ b/Kernel/Heap/kmalloc.cpp
@@ -362,8 +362,12 @@ void* kmalloc(size_t size)
Thread* current_thread = Thread::current();
if (!current_thread)
current_thread = Processor::idle_thread();
- if (current_thread)
+ if (current_thread) {
+ // FIXME: By the time we check this, we have already allocated above.
+ // This means that in the case of an infinite recursion, we can't catch it this way.
+ VERIFY(current_thread->is_allocation_enabled());
PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
+ }
return ptr;
}
@@ -384,8 +388,10 @@ void kfree_sized(void* ptr, size_t size)
Thread* current_thread = Thread::current();
if (!current_thread)
current_thread = Processor::idle_thread();
- if (current_thread)
+ if (current_thread) {
+ VERIFY(current_thread->is_allocation_enabled());
PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
+ }
}
g_kmalloc_global->deallocate(ptr, size);
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 21de4a3439..a88a0be927 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -1244,6 +1244,9 @@ public:
bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
+ bool is_allocation_enabled() const { return m_allocation_enabled; }
+ void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
+
String backtrace();
private:
@@ -1348,6 +1351,7 @@ private:
u32 m_lock_requested_count { 0 };
IntrusiveListNode<Thread> m_blocked_threads_list_node;
LockRank m_lock_rank_mask { LockRank::None };
+ bool m_allocation_enabled { true };
#if LOCK_DEBUG
struct HoldingLockInfo {