summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2020-02-16 12:55:56 +0100
committerAndreas Kling <kling@serenityos.org>2020-02-16 12:55:56 +0100
commit31e1af732f077f84c8aa938a9c6a3cce1cd88a1a (patch)
treed86553aea6bce7ce2320020dbf5b1a0a23af4e8c /Kernel
parent02e199a9cb76a983cd5d9ceb1540080f675a8141 (diff)
downloadserenity-31e1af732f077f84c8aa938a9c6a3cce1cd88a1a.zip
Kernel+LibC: Allow sys$mmap() callers to specify address alignment
This is exposed via the non-standard serenity_mmap() call in userspace.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Process.cpp49
-rw-r--r--Kernel/Process.h4
-rw-r--r--Kernel/Syscall.h1
-rw-r--r--Kernel/VM/RangeAllocator.cpp20
-rw-r--r--Kernel/VM/RangeAllocator.h2
5 files changed, 53 insertions, 23 deletions
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp
index 5f8b3a7374..f7974a72fa 100644
--- a/Kernel/Process.cpp
+++ b/Kernel/Process.cpp
@@ -151,12 +151,12 @@ bool Process::in_group(gid_t gid) const
return m_gid == gid || m_extra_gids.contains(gid);
}
-Range Process::allocate_range(VirtualAddress vaddr, size_t size)
+Range Process::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = PAGE_ROUND_UP(size);
if (vaddr.is_null())
- return page_directory().range_allocator().allocate_anywhere(size);
+ return page_directory().range_allocator().allocate_anywhere(size, alignment);
return page_directory().range_allocator().allocate_specific(vaddr, size);
}
@@ -185,11 +185,9 @@ Region& Process::allocate_split_region(const Region& source_region, const Range&
return region;
}
-Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String& name, int prot, bool commit)
+Region* Process::allocate_region(const Range& range, const String& name, int prot, bool commit)
{
- auto range = allocate_range(vaddr, size);
- if (!range.is_valid())
- return nullptr;
+ ASSERT(range.is_valid());
auto& region = add_region(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
region.map(page_directory());
if (commit)
@@ -197,6 +195,14 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
return &region;
}
+Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String& name, int prot, bool commit)
+{
+ auto range = allocate_range(vaddr, size);
+ if (!range.is_valid())
+ return nullptr;
+ return allocate_region(range, name, prot, commit);
+}
+
Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size, NonnullRefPtr<Inode> inode, const String& name, int prot)
{
auto range = allocate_range(vaddr, size);
@@ -207,9 +213,10 @@ Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size,
return &region;
}
-Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible)
+Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible)
{
- size_t end_in_vmobject = offset_in_vmobject + size;
+ ASSERT(range.is_valid());
+ size_t end_in_vmobject = offset_in_vmobject + range.size();
if (end_in_vmobject < offset_in_vmobject) {
dbgprintf("allocate_region_with_vmobject: Overflow (offset + size)\n");
return nullptr;
@@ -222,9 +229,6 @@ Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size
dbgprintf("allocate_region_with_vmobject: Attempt to allocate a region with an end past the end of its VMObject.\n");
return nullptr;
}
- auto range = allocate_range(vaddr, size);
- if (!range.is_valid())
- return nullptr;
offset_in_vmobject &= PAGE_MASK;
Region* region;
if (user_accessible)
@@ -235,6 +239,15 @@ Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size
return region;
}
+
+Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible)
+{
+ auto range = allocate_range(vaddr, size);
+ if (!range.is_valid())
+ return nullptr;
+ return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot, user_accessible);
+}
+
bool Process::deallocate_region(Region& region)
{
InterruptDisabler disabler;
@@ -364,11 +377,15 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params)
void* addr = (void*)params.addr;
size_t size = params.size;
+ size_t alignment = params.alignment;
int prot = params.prot;
int flags = params.flags;
int fd = params.fd;
int offset = params.offset;
+ if (alignment & ~PAGE_MASK)
+ return (void*)-EINVAL;
+
if (!is_user_range(VirtualAddress(addr), size))
return (void*)-EFAULT;
@@ -407,15 +424,19 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params)
Region* region = nullptr;
+ auto range = allocate_range(VirtualAddress(addr), size, alignment);
+ if (!range.is_valid())
+ return (void*)-ENOMEM;
+
if (map_purgeable) {
auto vmobject = PurgeableVMObject::create_with_size(size);
- region = allocate_region_with_vmobject(VirtualAddress(addr), size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
+ region = allocate_region_with_vmobject(range, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
if (!region && (!map_fixed && addr != 0))
region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
} else if (map_anonymous) {
- region = allocate_region(VirtualAddress(addr), size, !name.is_null() ? name : "mmap", prot, false);
+ region = allocate_region(range, !name.is_null() ? name : "mmap", prot, false);
if (!region && (!map_fixed && addr != 0))
- region = allocate_region({}, size, !name.is_null() ? name : "mmap", prot, false);
+ region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, false);
} else {
if (offset < 0)
return (void*)-EINVAL;
diff --git a/Kernel/Process.h b/Kernel/Process.h
index abf02608d7..aa130dd315 100644
--- a/Kernel/Process.h
+++ b/Kernel/Process.h
@@ -364,6 +364,8 @@ public:
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible = true);
Region* allocate_file_backed_region(VirtualAddress, size_t, NonnullRefPtr<Inode>, const String& name, int prot);
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
+ Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible = true);
+ Region* allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
bool deallocate_region(Region& region);
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
@@ -405,7 +407,7 @@ private:
Process(Thread*& first_thread, const String& name, uid_t, gid_t, pid_t ppid, RingLevel, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
static pid_t allocate_pid();
- Range allocate_range(VirtualAddress, size_t);
+ Range allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
Region& add_region(NonnullOwnPtr<Region>);
diff --git a/Kernel/Syscall.h b/Kernel/Syscall.h
index 714cc4bff9..83af79f1a9 100644
--- a/Kernel/Syscall.h
+++ b/Kernel/Syscall.h
@@ -240,6 +240,7 @@ struct StringListArgument {
struct SC_mmap_params {
uint32_t addr;
uint32_t size;
+ uint32_t alignment;
int32_t prot;
int32_t flags;
int32_t fd;
diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp
index 95d34120e5..4504d810c6 100644
--- a/Kernel/VM/RangeAllocator.cpp
+++ b/Kernel/VM/RangeAllocator.cpp
@@ -94,7 +94,7 @@ void RangeAllocator::carve_at_index(int index, const Range& range)
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
}
-Range RangeAllocator::allocate_anywhere(size_t size)
+Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
{
#ifdef VM_GUARD_PAGES
// NOTE: We pad VM allocations with a guard page on each side.
@@ -104,26 +104,32 @@ Range RangeAllocator::allocate_anywhere(size_t size)
size_t effective_size = size;
size_t offset_from_effective_base = 0;
#endif
+
for (int i = 0; i < m_available_ranges.size(); ++i) {
auto& available_range = m_available_ranges[i];
- if (available_range.size() < effective_size)
+ // FIXME: This check is probably excluding some valid candidates when using a large alignment.
+ if (available_range.size() < (effective_size + alignment))
continue;
- Range allocated_range(available_range.base().offset(offset_from_effective_base), size);
- if (available_range.size() == effective_size) {
+
+ uintptr_t initial_base = available_range.base().offset(offset_from_effective_base).get();
+ uintptr_t aligned_base = round_up_to_power_of_two(initial_base, alignment);
+
+ Range allocated_range(VirtualAddress(aligned_base), size);
+ if (available_range == allocated_range) {
#ifdef VRA_DEBUG
- dbgprintf("VRA: Allocated perfect-fit anywhere(%u): %x\n", size, allocated_range.base().get());
+ dbgprintf("VRA: Allocated perfect-fit anywhere(%zu, %zu): %x\n", size, alignment, allocated_range.base().get());
#endif
m_available_ranges.remove(i);
return allocated_range;
}
carve_at_index(i, allocated_range);
#ifdef VRA_DEBUG
- dbgprintf("VRA: Allocated anywhere(%u): %x\n", size, allocated_range.base().get());
+ dbgprintf("VRA: Allocated anywhere(%zu, %zu): %x\n", size, alignment, allocated_range.base().get());
dump();
#endif
return allocated_range;
}
- kprintf("VRA: Failed to allocate anywhere: %u\n", size);
+ kprintf("VRA: Failed to allocate anywhere: %zu, %zu\n", size, alignment);
return {};
}
diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h
index a826242c9e..6db5783e10 100644
--- a/Kernel/VM/RangeAllocator.h
+++ b/Kernel/VM/RangeAllocator.h
@@ -84,7 +84,7 @@ public:
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(const RangeAllocator&);
- Range allocate_anywhere(size_t);
+ Range allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
Range allocate_specific(VirtualAddress, size_t);
void deallocate(Range);