summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorLiav A <liavalb@gmail.com>2022-08-06 21:05:48 +0300
committerIdan Horowitz <idan.horowitz@gmail.com>2022-09-16 14:55:45 +0300
commit3ad0e1a1d595a931e21c8f4ccfb15f61379fa647 (patch)
treec0bdb1ef1b13716ae881182fe91f711e48f9e513 /Kernel/Memory
parentc88cc8557f20668f575dc07d197b8189c94e46af (diff)
downloadserenity-3ad0e1a1d595a931e21c8f4ccfb15f61379fa647.zip
Kernel: Handle mmap requests on zero-length data file inodes safely
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/PrivateInodeVMObject.cpp13
-rw-r--r--Kernel/Memory/PrivateInodeVMObject.h1
-rw-r--r--Kernel/Memory/SharedInodeVMObject.cpp12
-rw-r--r--Kernel/Memory/SharedInodeVMObject.h1
4 files changed, 25 insertions, 2 deletions
diff --git a/Kernel/Memory/PrivateInodeVMObject.cpp b/Kernel/Memory/PrivateInodeVMObject.cpp
index 314ee5351b..6f769c6df5 100644
--- a/Kernel/Memory/PrivateInodeVMObject.cpp
+++ b/Kernel/Memory/PrivateInodeVMObject.cpp
@@ -11,7 +11,18 @@ namespace Kernel::Memory {
ErrorOr<NonnullLockRefPtr<PrivateInodeVMObject>> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
{
- auto new_physical_pages = TRY(VMObject::try_create_physical_pages(inode.size()));
+ if (inode.size() == 0)
+ return EINVAL;
+ return try_create_with_inode_and_range(inode, 0, inode.size());
+}
+
+ErrorOr<NonnullLockRefPtr<PrivateInodeVMObject>> PrivateInodeVMObject::try_create_with_inode_and_range(Inode& inode, u64 offset, size_t range_size)
+{
+ // Note: To ensure further allocation of a Region with this VMObject will not complain
+ // on "smaller" VMObject than the requested Region, we simply take the max size between both values.
+ auto size = max(inode.size(), (offset + range_size));
+ VERIFY(size > 0);
+ auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) PrivateInodeVMObject(inode, move(new_physical_pages), move(dirty_pages)));
}
diff --git a/Kernel/Memory/PrivateInodeVMObject.h b/Kernel/Memory/PrivateInodeVMObject.h
index c40b6d45c8..9542bf1b78 100644
--- a/Kernel/Memory/PrivateInodeVMObject.h
+++ b/Kernel/Memory/PrivateInodeVMObject.h
@@ -18,6 +18,7 @@ public:
virtual ~PrivateInodeVMObject() override;
static ErrorOr<NonnullLockRefPtr<PrivateInodeVMObject>> try_create_with_inode(Inode&);
+ static ErrorOr<NonnullLockRefPtr<PrivateInodeVMObject>> try_create_with_inode_and_range(Inode&, u64 offset, size_t range_size);
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
private:
diff --git a/Kernel/Memory/SharedInodeVMObject.cpp b/Kernel/Memory/SharedInodeVMObject.cpp
index aa78cd2aac..4cf5475cb4 100644
--- a/Kernel/Memory/SharedInodeVMObject.cpp
+++ b/Kernel/Memory/SharedInodeVMObject.cpp
@@ -12,7 +12,17 @@ namespace Kernel::Memory {
ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode)
{
- size_t size = inode.size();
+ if (inode.size() == 0)
+ return EINVAL;
+ return try_create_with_inode_and_range(inode, 0, inode.size());
+}
+
+ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode_and_range(Inode& inode, u64 offset, size_t range_size)
+{
+ // Note: To ensure further allocation of a Region with this VMObject will not complain
+ // on "smaller" VMObject than the requested Region, we simply take the max size between both values.
+ auto size = max(inode.size(), (offset + range_size));
+ VERIFY(size > 0);
if (auto shared_vmobject = inode.shared_vmobject())
return shared_vmobject.release_nonnull();
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
diff --git a/Kernel/Memory/SharedInodeVMObject.h b/Kernel/Memory/SharedInodeVMObject.h
index 8a9ee21c46..d1f859864e 100644
--- a/Kernel/Memory/SharedInodeVMObject.h
+++ b/Kernel/Memory/SharedInodeVMObject.h
@@ -16,6 +16,7 @@ class SharedInodeVMObject final : public InodeVMObject {
public:
static ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> try_create_with_inode(Inode&);
+ static ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> try_create_with_inode_and_range(Inode&, u64 offset, size_t range_size);
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
ErrorOr<void> sync(off_t offset_in_pages = 0, size_t pages = -1);