summaryrefslogtreecommitdiff
path: root/Kernel/VM/RangeAllocator.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-01-29 12:06:47 +0100
committerAndreas Kling <kling@serenityos.org>2021-01-29 12:11:42 +0100
commitc8e7baf4b8d9da51e925d029254aaf3c8ed8c5e4 (patch)
treef5b185df6f35723d101f00d8dfc83128ab107048 /Kernel/VM/RangeAllocator.cpp
parente2abf615b7cec26168adc80e9d2ce290f3539db4 (diff)
downloadserenity-c8e7baf4b8d9da51e925d029254aaf3c8ed8c5e4.zip
Kernel: Check for alignment size overflow when allocating VM ranges
Also add some sanity check assertions that we're generating and returning ranges contained within the RangeAllocator's total range. Fixes #5162.
Diffstat (limited to 'Kernel/VM/RangeAllocator.cpp')
-rw-r--r--Kernel/VM/RangeAllocator.cpp15
1 files changed, 14 insertions, 1 deletions
diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp
index 743ecf5439..35a4a783cc 100644
--- a/Kernel/VM/RangeAllocator.cpp
+++ b/Kernel/VM/RangeAllocator.cpp
@@ -25,6 +25,7 @@
*/
#include <AK/BinarySearch.h>
+#include <AK/Checked.h>
#include <AK/QuickSort.h>
#include <Kernel/Debug.h>
#include <Kernel/Random.h>
@@ -85,9 +86,12 @@ void RangeAllocator::carve_at_index(int index, const Range& range)
ASSERT(m_lock.is_locked());
auto remaining_parts = m_available_ranges[index].carve(range);
ASSERT(remaining_parts.size() >= 1);
+ ASSERT(m_total_range.contains(remaining_parts[0]));
m_available_ranges[index] = remaining_parts[0];
- if (remaining_parts.size() == 2)
+ if (remaining_parts.size() == 2) {
+ ASSERT(m_total_range.contains(remaining_parts[1]));
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
+ }
}
Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
@@ -125,6 +129,9 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
#ifdef VM_GUARD_PAGES
// NOTE: We pad VM allocations with a guard page on each side.
+ if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
+ return {};
+
size_t effective_size = size + PAGE_SIZE * 2;
size_t offset_from_effective_base = PAGE_SIZE;
#else
@@ -132,6 +139,9 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
size_t offset_from_effective_base = 0;
#endif
+ if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
+ return {};
+
ScopedSpinLock lock(m_lock);
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
auto& available_range = m_available_ranges[i];
@@ -143,6 +153,8 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
Range allocated_range(VirtualAddress(aligned_base), size);
+ ASSERT(m_total_range.contains(allocated_range));
+
if (available_range == allocated_range) {
m_available_ranges.remove(i);
return allocated_range;
@@ -166,6 +178,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
ScopedSpinLock lock(m_lock);
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
auto& available_range = m_available_ranges[i];
+ ASSERT(m_total_range.contains(allocated_range));
if (!available_range.contains(base, size))
continue;
if (available_range == allocated_range) {