diff options
author | Andreas Kling <awesomekling@gmail.com> | 2019-09-22 15:12:29 +0200 |
---|---|---|
committer | Andreas Kling <awesomekling@gmail.com> | 2019-09-22 15:12:29 +0200 |
commit | 9c549c178a35934425e4e46be353f6e5e4f60b4f (patch) | |
tree | c698be533df46acd72a15723990a84c1132382ff /Kernel/VM/RangeAllocator.cpp | |
parent | bd1e8bf16639eda528df671e64447febbcffd10a (diff) | |
download | serenity-9c549c178a35934425e4e46be353f6e5e4f60b4f.zip |
Kernel: Pad virtual address space allocations with guard pages
Put one unused page on each side of VM allocations to make invalid
accesses more likely to generate crashes.
Note that we will not add this guard padding for mmap() at a specific
memory address, only to "mmap it anywhere" requests.
Diffstat (limited to 'Kernel/VM/RangeAllocator.cpp')
-rw-r--r-- | Kernel/VM/RangeAllocator.cpp | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp index f4fb7ab3c1..8330a798be 100644 --- a/Kernel/VM/RangeAllocator.cpp +++ b/Kernel/VM/RangeAllocator.cpp @@ -59,12 +59,14 @@ void RangeAllocator::carve_at_index(int index, const Range& range) Range RangeAllocator::allocate_anywhere(size_t size) { + // NOTE: We pad VM allocations with a guard page on each side. + size_t padded_size = size + PAGE_SIZE * 2; for (int i = 0; i < m_available_ranges.size(); ++i) { auto& available_range = m_available_ranges[i]; - if (available_range.size() < size) + if (available_range.size() < padded_size) continue; - Range allocated_range(available_range.base(), size); - if (available_range.size() == size) { + Range allocated_range(available_range.base().offset(PAGE_SIZE), size); + if (available_range.size() == padded_size) { #ifdef VRA_DEBUG dbgprintf("VRA: Allocated perfect-fit anywhere(%u): %x\n", size, allocated_range.base().get()); #endif |