summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorLiav A <liavalb@gmail.com>2021-10-22 11:59:31 +0300
committerAndreas Kling <kling@serenityos.org>2021-10-22 13:13:00 +0200
commit026687816d65a28bd3a553c7a343d8b3421a9483 (patch)
tree66ee4ad8444a865f6a65a784a6b51f93cdf539be /Kernel
parent89afd4d063b79ef2eba81d1bfc7857c388679c5e (diff)
downloadserenity-026687816d65a28bd3a553c7a343d8b3421a9483.zip
Kernel: Fix restrictions in is_allowed_to_mmap_to_userspace function
This small change simplifies the function a bit but also fixes a problem with it. Let's take an example to see this: Let's say we have a reserved range between 0xe0000 to 0xfffff (EBDA), then we want to map from the memory device (/dev/mem) the entire EBDA to a program. If a program tries to map more than 131072 bytes, the current logic will work - the start address is 0xe0000, and ofcourse it's below the limit, hence it passes the first two restrictions. Then, the third if statement will fail if we try to mmap more than the said allowed bytes. However, let's take another scenario, where we try to mmap from 0xf0000 - but we try to mmap less than 131072 - but more than 65536. In such case, we again pass the first two if statements, but the third one is passed two, because it doesn't take into account the offseted address from the start of the reserved range (0xe0000). In such case, a user can easily mmap 65535 bytes above 0x100000. This might seem negligible. However, it's still a severe bug that can theoretically be exploited into a info leak or tampering with important kernel structures.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Memory/MemoryManager.cpp11
-rw-r--r--Kernel/PhysicalAddress.h2
2 files changed, 9 insertions, 4 deletions
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index 764fd0b887..377cea04ca 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -180,13 +180,16 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, VirtualRange const& range) const
{
VERIFY(!m_reserved_memory_ranges.is_empty());
+ // Note: Guard against overflow in case someone tries to mmap on the edge of
+ // the RAM
+ if (start_address.offset_addition_would_overflow(range.size()))
+ return false;
+ auto end_address = start_address.offset(range.size());
for (auto& current_range : m_reserved_memory_ranges) {
- if (!(current_range.start <= start_address))
+ if (current_range.start > start_address)
continue;
- if (!(current_range.start.offset(current_range.length) > start_address))
+ if (current_range.start.offset(current_range.length) < end_address)
continue;
- if (current_range.length < range.size())
- return false;
return true;
}
return false;
diff --git a/Kernel/PhysicalAddress.h b/Kernel/PhysicalAddress.h
index 0a5d0aa4b4..f6d9367e00 100644
--- a/Kernel/PhysicalAddress.h
+++ b/Kernel/PhysicalAddress.h
@@ -6,6 +6,7 @@
#pragma once
+#include <AK/Checked.h>
#include <AK/Format.h>
#include <AK/Types.h>
@@ -30,6 +31,7 @@ public:
}
[[nodiscard]] PhysicalAddress offset(PhysicalPtr o) const { return PhysicalAddress(m_address + o); }
+ [[nodiscard]] bool offset_addition_would_overflow(PhysicalPtr o) const { return Checked<PhysicalPtr>::addition_would_overflow(m_address, o); }
[[nodiscard]] PhysicalPtr get() const { return m_address; }
void set(PhysicalPtr address) { m_address = address; }
void mask(PhysicalPtr m) { m_address &= m; }