summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-07-04 16:00:57 -0600
committerAndreas Kling <kling@serenityos.org>2020-07-06 10:00:24 +0200
commit49f5069b769958274c9ab0e653679ed8eb62a01f (patch)
treee4af3a93c512ef5726ed51ea0abd5f379b633470 /Kernel
parent788b2d64c66a0741717b638a03baead77856268e (diff)
downloadserenity-49f5069b769958274c9ab0e653679ed8eb62a01f.zip
Kernel: Add a SpinLock to the WaitQueue
We need to be able to prevent a WaitQueue from being modified by another CPU. So, add a SpinLock to it. Because this pushes some other class over the 64 byte limit, we also need to add another 128-byte bucket to the slab allocator.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Heap/SlabAllocator.cpp6
-rw-r--r--Kernel/WaitQueue.cpp10
-rw-r--r--Kernel/WaitQueue.h2
3 files changed, 13 insertions, 5 deletions
diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp
index c27b86857e..b3ba8c1cce 100644
--- a/Kernel/Heap/SlabAllocator.cpp
+++ b/Kernel/Heap/SlabAllocator.cpp
@@ -114,6 +114,7 @@ private:
static SlabAllocator<16> s_slab_allocator_16;
static SlabAllocator<32> s_slab_allocator_32;
static SlabAllocator<64> s_slab_allocator_64;
+static SlabAllocator<128> s_slab_allocator_128;
static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size());
@@ -130,6 +131,7 @@ void slab_alloc_init()
s_slab_allocator_16.init(128 * KB);
s_slab_allocator_32.init(128 * KB);
s_slab_allocator_64.init(512 * KB);
+ s_slab_allocator_128.init(512 * KB);
}
void* slab_alloc(size_t slab_size)
@@ -140,6 +142,8 @@ void* slab_alloc(size_t slab_size)
return s_slab_allocator_32.alloc();
if (slab_size <= 64)
return s_slab_allocator_64.alloc();
+ if (slab_size <= 128)
+ return s_slab_allocator_128.alloc();
ASSERT_NOT_REACHED();
}
@@ -151,6 +155,8 @@ void slab_dealloc(void* ptr, size_t slab_size)
return s_slab_allocator_32.dealloc(ptr);
if (slab_size <= 64)
return s_slab_allocator_64.dealloc(ptr);
+ if (slab_size <= 128)
+ return s_slab_allocator_128.dealloc(ptr);
ASSERT_NOT_REACHED();
}
diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp
index c5122f64f3..77ec3caec2 100644
--- a/Kernel/WaitQueue.cpp
+++ b/Kernel/WaitQueue.cpp
@@ -39,13 +39,13 @@ WaitQueue::~WaitQueue()
void WaitQueue::enqueue(Thread& thread)
{
- ScopedCritical critical;
+ ScopedSpinLock queue_lock(m_lock);
m_threads.append(thread);
}
void WaitQueue::wake_one(Atomic<bool>* lock)
{
- ScopedCritical critical;
+ ScopedSpinLock queue_lock(m_lock);
if (lock)
*lock = false;
if (m_threads.is_empty())
@@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
void WaitQueue::wake_n(i32 wake_count)
{
- ScopedCritical critical;
+ ScopedSpinLock queue_lock(m_lock);
if (m_threads.is_empty())
return;
@@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count)
void WaitQueue::wake_all()
{
- ScopedCritical critical;
+ ScopedSpinLock queue_lock(m_lock);
if (m_threads.is_empty())
return;
while (!m_threads.is_empty())
@@ -82,7 +82,7 @@ void WaitQueue::wake_all()
void WaitQueue::clear()
{
- ScopedCritical critical;
+ ScopedSpinLock queue_lock(m_lock);
m_threads.clear();
}
diff --git a/Kernel/WaitQueue.h b/Kernel/WaitQueue.h
index c4beae4617..c7705880bc 100644
--- a/Kernel/WaitQueue.h
+++ b/Kernel/WaitQueue.h
@@ -28,6 +28,7 @@
#include <AK/Atomic.h>
#include <AK/SinglyLinkedList.h>
+#include <Kernel/SpinLock.h>
#include <Kernel/Thread.h>
namespace Kernel {
@@ -46,6 +47,7 @@ public:
private:
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
ThreadList m_threads;
+ SpinLock<u32> m_lock;
};
}