summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-10-10 11:56:57 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-10-10 11:58:15 +0200
commita6e4c504e2a06bf25ca14e239ff942ae89935a05 (patch)
treea5a24a2c3d0d1f30028ed3b0b100f6a0e96c0225
parentebacef36ee8639b03e8bd8096cfa54676f2e3432 (diff)
downloadserenity-a6e4c504e2a06bf25ca14e239ff942ae89935a05.zip
Kernel: Make SlabAllocator fall back to kmalloc() when slabs run out
This is obviously not ideal, and it would be better to teach it how to allocate more pages, etc. But since the physical page allocator itself currently uses SlabAllocator, it's a little bit tricky :^)
-rw-r--r--Kernel/Heap/SlabAllocator.cpp13
1 files changed, 11 insertions, 2 deletions
diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp
index 6b886db4fc..39b346b376 100644
--- a/Kernel/Heap/SlabAllocator.cpp
+++ b/Kernel/Heap/SlabAllocator.cpp
@@ -10,8 +10,9 @@ public:
void init(size_t size)
{
- void* base = kmalloc_eternal(size);
- FreeSlab* slabs = (FreeSlab*)base;
+ m_base = kmalloc_eternal(size);
+ m_end = (u8*)m_base + size;
+ FreeSlab* slabs = (FreeSlab*)m_base;
size_t slab_count = size / templated_slab_size;
for (size_t i = 1; i < slab_count; ++i) {
slabs[i].next = &slabs[i - 1];
@@ -27,6 +28,8 @@ public:
void* alloc()
{
InterruptDisabler disabler;
+ if (!m_freelist)
+ return kmalloc(slab_size());
ASSERT(m_freelist);
void* ptr = m_freelist;
m_freelist = m_freelist->next;
@@ -39,6 +42,10 @@ public:
{
InterruptDisabler disabler;
ASSERT(ptr);
+ if (ptr < m_base || ptr >= m_end) {
+ kfree(ptr);
+ return;
+ }
((FreeSlab*)ptr)->next = m_freelist;
m_freelist = (FreeSlab*)ptr;
++m_num_allocated;
@@ -57,6 +64,8 @@ private:
FreeSlab* m_freelist { nullptr };
size_t m_num_allocated { 0 };
size_t m_num_free { 0 };
+ void* m_base { nullptr };
+ void* m_end { nullptr };
static_assert(sizeof(FreeSlab) == templated_slab_size);
};