summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-10-31 17:12:23 -0600
committerAndreas Kling <kling@serenityos.org>2020-11-11 12:27:25 +0100
commit2b25a89ab5625b1dd04b10f76b447177fd294f53 (patch)
tree2ea23c9b19a9164f5de99eb9795c6c69de4892fb /Kernel
parent66f46d03e4aeb5fe77098db73f275ddab3cfde8c (diff)
downloadserenity-2b25a89ab5625b1dd04b10f76b447177fd294f53.zip
Kernel: Add locks around RangeAllocator
We need to keep multiple processors from changing it at the same time.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/VM/RangeAllocator.cpp7
-rw-r--r--Kernel/VM/RangeAllocator.h3
2 files changed, 10 insertions, 0 deletions
diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp
index e82fbdcffa..932a03fc88 100644
--- a/Kernel/VM/RangeAllocator.cpp
+++ b/Kernel/VM/RangeAllocator.cpp
@@ -44,12 +44,14 @@ void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
m_total_range = { base, size };
m_available_ranges.append({ base, size });
#ifdef VRA_DEBUG
+ ScopedSpinLock lock(m_lock);
dump();
#endif
}
void RangeAllocator::initialize_from_parent(const RangeAllocator& parent_allocator)
{
+ ScopedSpinLock lock(parent_allocator.m_lock);
m_total_range = parent_allocator.m_total_range;
m_available_ranges = parent_allocator.m_available_ranges;
}
@@ -60,6 +62,7 @@ RangeAllocator::~RangeAllocator()
void RangeAllocator::dump() const
{
+ ASSERT(m_lock.is_locked());
dbg() << "RangeAllocator{" << this << "}";
for (auto& range : m_available_ranges) {
dbg() << " " << String::format("%x", range.base().get()) << " -> " << String::format("%x", range.end().get() - 1);
@@ -85,6 +88,7 @@ Vector<Range, 2> Range::carve(const Range& taken)
void RangeAllocator::carve_at_index(int index, const Range& range)
{
+ ASSERT(m_lock.is_locked());
auto remaining_parts = m_available_ranges[index].carve(range);
ASSERT(remaining_parts.size() >= 1);
m_available_ranges[index] = remaining_parts[0];
@@ -106,6 +110,7 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
size_t offset_from_effective_base = 0;
#endif
+ ScopedSpinLock lock(m_lock);
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
auto& available_range = m_available_ranges[i];
// FIXME: This check is probably excluding some valid candidates when using a large alignment.
@@ -140,6 +145,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
return {};
Range allocated_range(base, size);
+ ScopedSpinLock lock(m_lock);
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
auto& available_range = m_available_ranges[i];
if (!available_range.contains(base, size))
@@ -161,6 +167,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
void RangeAllocator::deallocate(Range range)
{
+ ScopedSpinLock lock(m_lock);
ASSERT(m_total_range.contains(range));
ASSERT(range.size());
ASSERT(range.base() < range.end());
diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h
index a5fe6ad893..964139c475 100644
--- a/Kernel/VM/RangeAllocator.h
+++ b/Kernel/VM/RangeAllocator.h
@@ -29,6 +29,7 @@
#include <AK/String.h>
#include <AK/Traits.h>
#include <AK/Vector.h>
+#include <Kernel/SpinLock.h>
#include <Kernel/VirtualAddress.h>
namespace Kernel {
@@ -92,6 +93,7 @@ public:
bool contains(const Range& range) const
{
+ ScopedSpinLock lock(m_lock);
return m_total_range.contains(range);
}
@@ -100,6 +102,7 @@ private:
Vector<Range> m_available_ranges;
Range m_total_range;
+ mutable SpinLock<u8> m_lock;
};
inline const LogStream& operator<<(const LogStream& stream, const Range& value)