diff options
author | Brian Gianforcaro <bgianf@serenityos.org> | 2021-07-18 09:31:28 -0700 |
---|---|---|
committer | Gunnar Beutner <gunnar@beutner.name> | 2021-07-20 03:21:14 +0200 |
commit | 27e1120dffd98e55f45a6a175dc65b2eab22270d (patch) | |
tree | 5c109f0fb22634f4a3b1d4a03ffe6cea30ee2420 /Kernel/VM/MemoryManager.cpp | |
parent | af543328ea3efb4d2a39f577ae3a6991babff87d (diff) | |
download | serenity-27e1120dffd98e55f45a6a175dc65b2eab22270d.zip |
Kernel: Move syscall precondition validates to MM
Move these to MM to simplify the flow of the syscall handler.
While here, also make sure we hold the process space lock for
the duration of the validation to avoid potential issues where
another thread attempts to modify the process space during the
validation. This will allow us to move the validation out of the
big process lock scope in a future change.
Additionally utilize the new no_lock variants of functions to avoid
unnecessary recursive process space spinlock acquisitions.
Diffstat (limited to 'Kernel/VM/MemoryManager.cpp')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 49 |
1 files changed, 47 insertions, 2 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 30f8a54fc7..46767ac923 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -624,10 +624,55 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) return nullptr; } +Region* MemoryManager::find_user_region_from_vaddr_no_lock(Space& space, VirtualAddress vaddr) +{ + VERIFY(space.get_lock().own_lock()); + return space.find_region_containing({ vaddr, 1 }); +} + Region* MemoryManager::find_user_region_from_vaddr(Space& space, VirtualAddress vaddr) { ScopedSpinLock lock(space.get_lock()); - return space.find_region_containing({ vaddr, 1 }); + return find_user_region_from_vaddr_no_lock(space, vaddr); +} + +void MemoryManager::validate_syscall_preconditions(Space& space, RegisterState& regs) +{ + // We take the space lock once here and then use the no_lock variants + // to avoid excessive spinlock recursion in this extemely common path. + ScopedSpinLock lock(space.get_lock()); + + auto unlock_and_handle_crash = [&lock, ®s](const char* description, int signal) { + lock.unlock(); + handle_crash(regs, description, signal); + }; + + { + VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() }; + if (!MM.validate_user_stack_no_lock(space, userspace_sp)) { + dbgln("Invalid stack pointer: {:p}", userspace_sp); + unlock_and_handle_crash("Bad stack on syscall entry", SIGSTKFLT); + } + } + + { + VirtualAddress ip = VirtualAddress { regs.ip() }; + auto* calling_region = MM.find_user_region_from_vaddr_no_lock(space, ip); + if (!calling_region) { + dbgln("Syscall from {:p} which has no associated region", ip); + unlock_and_handle_crash("Syscall from unknown region", SIGSEGV); + } + + if (calling_region->is_writable()) { + dbgln("Syscall from writable memory at {:p}", ip); + unlock_and_handle_crash("Syscall from writable memory", SIGSEGV); + } + + if (space.enforces_syscall_regions() && !calling_region->is_syscall_region()) { + dbgln("Syscall from non-syscall region"); + unlock_and_handle_crash("Syscall from non-syscall region", SIGSEGV); + } + } } Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr) @@ -1039,7 +1084,7 @@ bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vad if (!is_user_address(vaddr)) return false; - auto* region = find_user_region_from_vaddr(space, vaddr); + auto* region = find_user_region_from_vaddr_no_lock(space, vaddr); return region && region->is_user() && region->is_stack(); } |