summaryrefslogtreecommitdiff
path: root/Kernel/Heap/SlabAllocator.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-10-02 13:35:30 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-10-02 13:47:40 +0200
commitd553bae749f48aab6c28cf8ad4a5b7624561738b (patch)
treee72c47608998616d759700ce2c2e29eba55683d2 /Kernel/Heap/SlabAllocator.cpp
parent8f842375a2ec816ed35a90bd93a7ecc3e544004d (diff)
downloadserenity-d553bae749f48aab6c28cf8ad4a5b7624561738b.zip
Kernel: Allocate more 8-byte slabs than anything else
We need these for PhysicalPage objects. Ultimately I'd like to get rid of these objects entirely, but while we still have to deal with them, let's at least handle large demand a bit better.
Diffstat (limited to 'Kernel/Heap/SlabAllocator.cpp')
-rw-r--r--Kernel/Heap/SlabAllocator.cpp7
1 files changed, 4 insertions, 3 deletions
diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp
index 23f735ec02..6b886db4fc 100644
--- a/Kernel/Heap/SlabAllocator.cpp
+++ b/Kernel/Heap/SlabAllocator.cpp
@@ -79,9 +79,10 @@ void for_each_allocator(Callback callback)
void slab_alloc_init()
{
- for_each_allocator([&](auto& allocator) {
- allocator.init(128 * KB);
- });
+ s_slab_allocator_8.init(384 * KB);
+ s_slab_allocator_16.init(128 * KB);
+ s_slab_allocator_32.init(128 * KB);
+ s_slab_allocator_48.init(128 * KB);
}
void* slab_alloc(size_t slab_size)