summaryrefslogtreecommitdiff
path: root/Kernel/KBuffer.h
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-14 09:57:19 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-14 10:01:50 +0100
commit09b1b09c1923c5e82c5500a06c501088ab5ac4ce (patch)
treef57307d460c8856604dfa21f3ab27b7a449cd4bc /Kernel/KBuffer.h
parent198d64180886e6fad2997513c4c8f68b1338f4e4 (diff)
downloadserenity-09b1b09c1923c5e82c5500a06c501088ab5ac4ce.zip
Kernel: Assert if rounding-up-to-page-size would wrap around to 0
If we try to align a number above 0xfffff000 to the next multiple of the page size (4 KiB), it would wrap around to 0. This is most likely never what we want, so let's assert if that happens.
Diffstat (limited to 'Kernel/KBuffer.h')
-rw-r--r--Kernel/KBuffer.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/Kernel/KBuffer.h b/Kernel/KBuffer.h
index 031c154308..b65c2b1d24 100644
--- a/Kernel/KBuffer.h
+++ b/Kernel/KBuffer.h
@@ -50,7 +50,7 @@ class KBufferImpl : public RefCounted<KBufferImpl> {
public:
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
- auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, strategy);
+ auto region = MM.allocate_kernel_region(page_round_up(size), name, access, strategy);
if (!region)
return nullptr;
return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy));
@@ -58,7 +58,7 @@ public:
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
- auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, strategy);
+ auto region = MM.allocate_kernel_region(page_round_up(bytes.size()), name, access, strategy);
if (!region)
return nullptr;
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
@@ -81,7 +81,7 @@ public:
bool expand(size_t new_capacity)
{
- auto new_region = MM.allocate_kernel_region(PAGE_ROUND_UP(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
+ auto new_region = MM.allocate_kernel_region(page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
if (!new_region)
return false;
if (m_region && m_size > 0)