summaryrefslogtreecommitdiff
path: root/Kernel/VM
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-14 09:57:19 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-14 10:01:50 +0100
commit09b1b09c1923c5e82c5500a06c501088ab5ac4ce (patch)
treef57307d460c8856604dfa21f3ab27b7a449cd4bc /Kernel/VM
parent198d64180886e6fad2997513c4c8f68b1338f4e4 (diff)
downloadserenity-09b1b09c1923c5e82c5500a06c501088ab5ac4ce.zip
Kernel: Assert if rounding-up-to-page-size would wrap around to 0
If we try to align a number above 0xfffff000 to the next multiple of the page size (4 KiB), it would wrap around to 0. This is most likely never what we want, so let's assert if that happens.
Diffstat (limited to 'Kernel/VM')
-rw-r--r--Kernel/VM/InodeVMObject.cpp2
-rw-r--r--Kernel/VM/MemoryManager.cpp2
-rw-r--r--Kernel/VM/MemoryManager.h19
-rw-r--r--Kernel/VM/Region.cpp12
-rw-r--r--Kernel/VM/Space.cpp4
-rw-r--r--Kernel/VM/TypedMapping.h2
6 files changed, 28 insertions, 13 deletions
diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp
index 7676fcad08..4f8799142e 100644
--- a/Kernel/VM/InodeVMObject.cpp
+++ b/Kernel/VM/InodeVMObject.cpp
@@ -77,7 +77,7 @@ void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new
InterruptDisabler disabler;
- auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
+ auto new_page_count = page_round_up(new_size) / PAGE_SIZE;
m_physical_pages.resize(new_page_count);
m_dirty_pages.grow(new_page_count, false);
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 7e329ef4a6..be32f030bf 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -167,7 +167,7 @@ void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) });
+ m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) });
if (multiboot_info_ptr->flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 53a4cc03a5..b2eba06eae 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -39,8 +39,23 @@
namespace Kernel {
-#define PAGE_ROUND_UP(x) ((((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
-#define PAGE_ROUND_DOWN(x) (((FlatPtr)(x)) & ~(PAGE_SIZE - 1))
+constexpr bool page_round_up_would_wrap(FlatPtr x)
+{
+ return x > 0xfffff000u;
+}
+
+constexpr FlatPtr page_round_up(FlatPtr x)
+{
+ FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
+ // Rounding up >0xffff0000 wraps back to 0. That's never what we want.
+ ASSERT(x == 0 || rounded != 0);
+ return rounded;
+}
+
+constexpr FlatPtr page_round_down(FlatPtr x)
+{
+ return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
+}
inline u32 low_physical_to_virtual(u32 physical)
{
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index 94bcb677c2..375acd1127 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -154,8 +154,8 @@ bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
return false;
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
- size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
- size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
+ size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE;
+ size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE;
return is_volatile_range({ first_page_index, last_page_index - first_page_index });
}
@@ -171,16 +171,16 @@ auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, b
// partial pages volatile to prevent potentially non-volatile
// data to be discarded. So rund up the first page and round
// down the last page.
- size_t first_page_index = PAGE_ROUND_UP(offset_in_vmobject) / PAGE_SIZE;
- size_t last_page_index = PAGE_ROUND_DOWN(offset_in_vmobject + size) / PAGE_SIZE;
+ size_t first_page_index = page_round_up(offset_in_vmobject) / PAGE_SIZE;
+ size_t last_page_index = page_round_down(offset_in_vmobject + size) / PAGE_SIZE;
if (first_page_index != last_page_index)
add_volatile_range({ first_page_index, last_page_index - first_page_index });
} else {
// If marking pages as non-volatile, round down the first page
// and round up the last page to make sure the beginning and
// end of the range doesn't inadvertedly get discarded.
- size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
- size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
+ size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE;
+ size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE;
switch (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) {
case PurgeablePageRanges::RemoveVolatileError::Success:
case PurgeablePageRanges::RemoveVolatileError::SuccessNoChange:
diff --git a/Kernel/VM/Space.cpp b/Kernel/VM/Space.cpp
index 5cb7e28d2e..bf429c240b 100644
--- a/Kernel/VM/Space.cpp
+++ b/Kernel/VM/Space.cpp
@@ -57,7 +57,7 @@ Space::~Space()
Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
- size = PAGE_ROUND_UP(size);
+ size = page_round_up(size);
if (vaddr.is_null())
return page_directory().range_allocator().allocate_anywhere(size, alignment);
return page_directory().range_allocator().allocate_specific(vaddr, size);
@@ -137,7 +137,7 @@ Region* Space::find_region_from_range(const Range& range)
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
return m_region_lookup_cache.region.unsafe_ptr();
- size_t size = PAGE_ROUND_UP(range.size());
+ size_t size = page_round_up(range.size());
for (auto& region : m_regions) {
if (region.vaddr() == range.base() && region.size() == size) {
m_region_lookup_cache.range = range;
diff --git a/Kernel/VM/TypedMapping.h b/Kernel/VM/TypedMapping.h
index c0f8cf98b1..e24976537c 100644
--- a/Kernel/VM/TypedMapping.h
+++ b/Kernel/VM/TypedMapping.h
@@ -47,7 +47,7 @@ template<typename T>
static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, u8 access = Region::Access::Read)
{
TypedMapping<T> table;
- table.region = MM.allocate_kernel_region(paddr.page_base(), PAGE_ROUND_UP(length), {}, access);
+ table.region = MM.allocate_kernel_region(paddr.page_base(), page_round_up(length), {}, access);
table.offset = paddr.offset_in_page();
return table;
}