summaryrefslogtreecommitdiff
path: root/Kernel/Heap
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-11-01 13:11:31 -0700
committerAndreas Kling <kling@serenityos.org>2020-11-04 21:21:37 +0100
commit28b109688bf279f34a96bd9c702b69e11269c59c (patch)
tree94f330e2bea2b3278bdf161b833d5012a845c35f /Kernel/Heap
parentb9a97ff81fbcadacf001472a0f71306d67d56eb8 (diff)
downloadserenity-28b109688bf279f34a96bd9c702b69e11269c59c.zip
Kernel: Defer kmalloc heap contraction
Because allocating/freeing regions may require locks that need to wait on other processors for completion, this needs to be delayed until it's safer. Otherwise it is possible to deadlock because we're holding the global heap lock.
Diffstat (limited to 'Kernel/Heap')
-rw-r--r--Kernel/Heap/kmalloc.cpp22
1 files changed, 19 insertions, 3 deletions
diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp
index ecbbda1114..bb46edc4f4 100644
--- a/Kernel/Heap/kmalloc.cpp
+++ b/Kernel/Heap/kmalloc.cpp
@@ -50,6 +50,8 @@
#define POOL_SIZE (2 * MiB)
#define ETERNAL_RANGE_SIZE (2 * MiB)
+static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
+
struct KmallocGlobalHeap {
struct ExpandGlobalHeap {
KmallocGlobalHeap& m_global_heap;
@@ -125,10 +127,26 @@ struct KmallocGlobalHeap {
for (size_t i = 0; i < m_global_heap.m_subheap_memory.size(); i++) {
if (m_global_heap.m_subheap_memory[i].vaddr().as_ptr() == memory) {
auto region = m_global_heap.m_subheap_memory.take(i);
- klog() << "kmalloc(): Removing memory from heap at " << region->vaddr() << ", bytes: " << region->size();
if (!m_global_heap.m_backup_memory) {
klog() << "kmalloc(): Using removed memory as backup: " << region->vaddr() << ", bytes: " << region->size();
m_global_heap.m_backup_memory = move(region);
+ } else {
+ klog() << "kmalloc(): Queue removing memory from heap at " << region->vaddr() << ", bytes: " << region->size();
+ Processor::deferred_call_queue([this, region = move(region)]() mutable {
+ // We need to defer freeing the region to prevent a potential
+ // deadlock since we are still holding the kmalloc lock
+ // We don't really need to do anything other than holding
+ // onto the region. Unless we already used the backup
+ // memory, in which case we want to use the region as the
+ // new backup.
+ ScopedSpinLock lock(s_lock);
+ if (!m_global_heap.m_backup_memory) {
+ klog() << "kmalloc(): Queued memory region at " << region->vaddr() << ", bytes: " << region->size() << " will be used as new backup";
+ m_global_heap.m_backup_memory = move(region);
+ } else {
+ klog() << "kmalloc(): Queued memory region at " << region->vaddr() << ", bytes: " << region->size() << " will be freed now";
+ }
+ });
}
return true;
}
@@ -179,8 +197,6 @@ bool g_dump_kmalloc_stacks;
static u8* s_next_eternal_ptr;
static u8* s_end_of_eternal_range;
-static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
-
void kmalloc_enable_expand()
{
g_kmalloc_global->allocate_backup_memory();