summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Kernel/Memory/AddressSpace.cpp15
-rw-r--r--Kernel/Memory/AddressSpace.h4
-rw-r--r--Kernel/Memory/MemoryManager.cpp10
-rw-r--r--Kernel/Memory/RegionTree.cpp6
-rw-r--r--Kernel/Memory/RegionTree.h7
-rw-r--r--Kernel/Syscalls/clock.cpp2
-rw-r--r--Kernel/Syscalls/execve.cpp10
-rw-r--r--Kernel/Syscalls/mmap.cpp4
-rw-r--r--Kernel/Thread.cpp2
9 files changed, 33 insertions, 27 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp
index 1df5e58e35..c27afa85c5 100644
--- a/Kernel/Memory/AddressSpace.cpp
+++ b/Kernel/Memory/AddressSpace.cpp
@@ -157,7 +157,7 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
return new_region.leak_ptr();
}
-ErrorOr<Region*> AddressSpace::allocate_region(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot, AllocationStrategy strategy)
+ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize_virtual_address, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot, AllocationStrategy strategy)
{
if (!requested_address.is_page_aligned())
return EINVAL;
@@ -168,20 +168,21 @@ ErrorOr<Region*> AddressSpace::allocate_region(VirtualAddress requested_address,
region_name = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
- if (requested_address.is_null())
- TRY(m_region_tree.place_anywhere(*region, size, alignment));
- else
+ if (requested_address.is_null()) {
+ TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
+ } else {
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
+ }
TRY(region->map(page_directory(), ShouldFlushTLB::No));
return region.leak_ptr();
}
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
- return allocate_region_with_vmobject(requested_range.base(), requested_range.size(), PAGE_SIZE, move(vmobject), offset_in_vmobject, name, prot, shared);
+ return allocate_region_with_vmobject(RandomizeVirtualAddress::Yes, requested_range.base(), requested_range.size(), PAGE_SIZE, move(vmobject), offset_in_vmobject, name, prot, shared);
}
-ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
+ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAddress randomize_virtual_address, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
if (!requested_address.is_page_aligned())
return EINVAL;
@@ -210,7 +211,7 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualAddress requ
SpinlockLocker locker(m_lock);
if (requested_address.is_null())
- TRY(m_region_tree.place_anywhere(*region, size, alignment));
+ TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
else
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h
index 9d314a5869..664dbf36f0 100644
--- a/Kernel/Memory/AddressSpace.h
+++ b/Kernel/Memory/AddressSpace.h
@@ -36,8 +36,8 @@ public:
ErrorOr<void> unmap_mmap_range(VirtualAddress, size_t);
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
- ErrorOr<Region*> allocate_region_with_vmobject(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
- ErrorOr<Region*> allocate_region(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
+ ErrorOr<Region*> allocate_region_with_vmobject(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
+ ErrorOr<Region*> allocate_region(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
void deallocate_region(Region& region);
NonnullOwnPtr<Region> take_region(Region& region);
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index a14c287d18..e6cc5b7d74 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -457,7 +457,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Allocate a virtual address range for our array
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
auto& region = *MUST(Region::create_unbacked()).leak_ptr();
- MUST(m_region_tree.place_anywhere(region, physical_page_array_pages * PAGE_SIZE));
+ MUST(m_region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE));
auto range = region.range();
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
@@ -773,7 +773,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, size));
+ TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -816,7 +816,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, size));
+ TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -829,7 +829,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, size, PAGE_SIZE));
+ TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE));
TRY(region->map(kernel_page_directory()));
return region;
}
@@ -843,7 +843,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
- TRY(m_region_tree.place_anywhere(*region, size));
+ TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
TRY(region->map(kernel_page_directory()));
return region;
}
diff --git a/Kernel/Memory/RegionTree.cpp b/Kernel/Memory/RegionTree.cpp
index 30f9661821..a32cbf991b 100644
--- a/Kernel/Memory/RegionTree.cpp
+++ b/Kernel/Memory/RegionTree.cpp
@@ -146,14 +146,14 @@ ErrorOr<VirtualRange> RegionTree::try_allocate_randomized(size_t size, size_t al
ErrorOr<NonnullOwnPtr<Region>> RegionTree::allocate_unbacked_anywhere(size_t size, size_t alignment)
{
auto region = TRY(Region::create_unbacked());
- TRY(place_anywhere(*region, size, alignment));
+ TRY(place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment));
return region;
}
-ErrorOr<void> RegionTree::place_anywhere(Region& region, size_t size, size_t alignment)
+ErrorOr<void> RegionTree::place_anywhere(Region& region, RandomizeVirtualAddress randomize_virtual_address, size_t size, size_t alignment)
{
SpinlockLocker locker(m_lock);
- auto range = TRY(try_allocate_anywhere(size, alignment));
+ auto range = TRY(randomize_virtual_address == RandomizeVirtualAddress::Yes ? try_allocate_randomized(size, alignment) : try_allocate_anywhere(size, alignment));
region.m_range = range;
m_regions.insert(region.vaddr().get(), region);
return {};
diff --git a/Kernel/Memory/RegionTree.h b/Kernel/Memory/RegionTree.h
index 81b530aebc..19a719c2d7 100644
--- a/Kernel/Memory/RegionTree.h
+++ b/Kernel/Memory/RegionTree.h
@@ -15,6 +15,11 @@
namespace Kernel::Memory {
+enum class RandomizeVirtualAddress {
+ No,
+ Yes,
+};
+
class RegionTree {
AK_MAKE_NONCOPYABLE(RegionTree);
AK_MAKE_NONMOVABLE(RegionTree);
@@ -34,7 +39,7 @@ public:
ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_anywhere(size_t size, size_t alignment = PAGE_SIZE);
- ErrorOr<void> place_anywhere(Region&, size_t size, size_t alignment = PAGE_SIZE);
+ ErrorOr<void> place_anywhere(Region&, RandomizeVirtualAddress, size_t size, size_t alignment = PAGE_SIZE);
ErrorOr<void> place_specifically(Region&, VirtualRange const&);
ErrorOr<NonnullOwnPtr<Memory::Region>> create_identity_mapped_region(PhysicalAddress, size_t);
diff --git a/Kernel/Syscalls/clock.cpp b/Kernel/Syscalls/clock.cpp
index 9517ba75a5..55c4f39945 100644
--- a/Kernel/Syscalls/clock.cpp
+++ b/Kernel/Syscalls/clock.cpp
@@ -17,7 +17,7 @@ ErrorOr<FlatPtr> Process::sys$map_time_page()
auto& vmobject = TimeManagement::the().time_page_vmobject();
- auto* region = TRY(address_space().allocate_region_with_vmobject({}, PAGE_SIZE, PAGE_SIZE, vmobject, 0, "Kernel time page"sv, PROT_READ, true));
+ auto* region = TRY(address_space().allocate_region_with_vmobject(Memory::RandomizeVirtualAddress::Yes, {}, PAGE_SIZE, PAGE_SIZE, vmobject, 0, "Kernel time page"sv, PROT_READ, true));
return region->vaddr().get();
}
diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp
index 983010c209..fbea22c70d 100644
--- a/Kernel/Syscalls/execve.cpp
+++ b/Kernel/Syscalls/execve.cpp
@@ -295,7 +295,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
}
auto region_name = TRY(KString::formatted("{} (master-tls)", elf_name));
- master_tls_region = TRY(new_space->allocate_region({}, program_header.size_in_memory(), PAGE_SIZE, region_name->view(), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
+ master_tls_region = TRY(new_space->allocate_region(Memory::RandomizeVirtualAddress::No, {}, program_header.size_in_memory(), PAGE_SIZE, region_name->view(), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
master_tls_size = program_header.size_in_memory();
master_tls_alignment = program_header.alignment();
@@ -323,7 +323,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
auto range_end = VirtualAddress { rounded_range_end };
- auto region = TRY(new_space->allocate_region(range_base, range_end.get() - range_base.get(), PAGE_SIZE, region_name->view(), prot, AllocationStrategy::Reserve));
+ auto region = TRY(new_space->allocate_region(Memory::RandomizeVirtualAddress::No, range_base, range_end.get() - range_base.get(), PAGE_SIZE, region_name->view(), prot, AllocationStrategy::Reserve));
// It's not always the case with PIE executables (and very well shouldn't be) that the
// virtual address in the program header matches the one we end up giving the process.
@@ -358,7 +358,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
auto range_end = VirtualAddress { rounded_range_end };
- auto region = TRY(new_space->allocate_region_with_vmobject(range_base, range_end.get() - range_base.get(), program_header.alignment(), *vmobject, program_header.offset(), elf_name->view(), prot, true));
+ auto region = TRY(new_space->allocate_region_with_vmobject(Memory::RandomizeVirtualAddress::No, range_base, range_end.get() - range_base.get(), program_header.alignment(), *vmobject, program_header.offset(), elf_name->view(), prot, true));
if (should_allow_syscalls == ShouldAllowSyscalls::Yes)
region->set_syscall_region(true);
@@ -392,7 +392,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
return ENOEXEC;
}
- auto* stack_region = TRY(new_space->allocate_region({}, Thread::default_userspace_stack_size, PAGE_SIZE, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
+ auto* stack_region = TRY(new_space->allocate_region(Memory::RandomizeVirtualAddress::No, {}, Thread::default_userspace_stack_size, PAGE_SIZE, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
stack_region->set_stack(true);
return LoadResult {
@@ -469,7 +469,7 @@ ErrorOr<void> Process::do_exec(NonnullRefPtr<OpenFileDescription> main_program_d
bool has_interpreter = interpreter_description;
interpreter_description = nullptr;
- auto* signal_trampoline_region = TRY(load_result.space->allocate_region_with_vmobject({}, PAGE_SIZE, PAGE_SIZE, g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true));
+ auto* signal_trampoline_region = TRY(load_result.space->allocate_region_with_vmobject(Memory::RandomizeVirtualAddress::No, {}, PAGE_SIZE, PAGE_SIZE, g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true));
signal_trampoline_region->set_syscall_region(true);
// (For dynamically linked executable) Allocate an FD for passing the main executable to the dynamic loader.
diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp
index c3ec9cc2e0..42f199d1ef 100644
--- a/Kernel/Syscalls/mmap.cpp
+++ b/Kernel/Syscalls/mmap.cpp
@@ -210,7 +210,7 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> use
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(rounded_size, strategy));
}
- region = TRY(address_space().allocate_region_with_vmobject(requested_range.base(), requested_range.size(), alignment, vmobject.release_nonnull(), 0, {}, prot, map_shared));
+ region = TRY(address_space().allocate_region_with_vmobject(map_randomized ? Memory::RandomizeVirtualAddress::Yes : Memory::RandomizeVirtualAddress::No, requested_range.base(), requested_range.size(), alignment, vmobject.release_nonnull(), 0, {}, prot, map_shared));
} else {
if (offset < 0)
return EINVAL;
@@ -507,7 +507,7 @@ ErrorOr<FlatPtr> Process::sys$allocate_tls(Userspace<char const*> initial_data,
if (multiple_threads)
return EINVAL;
- auto* region = TRY(address_space().allocate_region({}, size, PAGE_SIZE, "Master TLS"sv, PROT_READ | PROT_WRITE));
+ auto* region = TRY(address_space().allocate_region(Memory::RandomizeVirtualAddress::Yes, {}, size, PAGE_SIZE, "Master TLS"sv, PROT_READ | PROT_WRITE));
m_master_tls_region = TRY(region->try_make_weak_ptr());
m_master_tls_size = size;
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index 100db817e8..b168ea4c6b 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -1419,7 +1419,7 @@ ErrorOr<void> Thread::make_thread_specific_region(Badge<Process>)
if (!process().m_master_tls_region)
return {};
- auto* region = TRY(process().address_space().allocate_region({}, thread_specific_region_size(), PAGE_SIZE, "Thread-specific", PROT_READ | PROT_WRITE));
+ auto* region = TRY(process().address_space().allocate_region(Memory::RandomizeVirtualAddress::Yes, {}, thread_specific_region_size(), PAGE_SIZE, "Thread-specific", PROT_READ | PROT_WRITE));
m_thread_specific_range = region->range();