summaryrefslogtreecommitdiff
path: root/Kernel/Arch/x86
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-23 17:58:05 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-24 14:57:51 +0200
commitcf16b2c8e64709d570c5f54a981017d217e95ed0 (patch)
tree16c9efdaaa579ae51682a51a58949ce02c3d2092 /Kernel/Arch/x86
parentd6ef18f587d4a7e4f58487c84e0b9eb260f3ec5a (diff)
downloadserenity-cf16b2c8e64709d570c5f54a981017d217e95ed0.zip
Kernel: Wrap process address spaces in SpinlockProtected
This forces anyone who wants to look into and/or manipulate an address space to lock it. And this replaces the previous, more flimsy, manual spinlock use. Note that pointers *into* the address space are not safe to use after you unlock the space. We've got many issues like this, and we'll have to track those down as wlel.
Diffstat (limited to 'Kernel/Arch/x86')
-rw-r--r--Kernel/Arch/x86/common/CrashHandler.cpp2
-rw-r--r--Kernel/Arch/x86/common/Interrupts.cpp12
-rw-r--r--Kernel/Arch/x86/common/Spinlock.cpp1
3 files changed, 11 insertions, 4 deletions
diff --git a/Kernel/Arch/x86/common/CrashHandler.cpp b/Kernel/Arch/x86/common/CrashHandler.cpp
index df0ba7c214..c33276717a 100644
--- a/Kernel/Arch/x86/common/CrashHandler.cpp
+++ b/Kernel/Arch/x86/common/CrashHandler.cpp
@@ -35,7 +35,7 @@ void handle_crash(Kernel::RegisterState const& regs, char const* description, in
dump_registers(regs);
if (crashed_in_kernel) {
- process.address_space().dump_regions();
+ process.address_space().with([&](auto& space) { space->dump_regions(); });
PANIC("Crash in ring 0");
}
diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp
index e91de65693..604a74da33 100644
--- a/Kernel/Arch/x86/common/Interrupts.cpp
+++ b/Kernel/Arch/x86/common/Interrupts.cpp
@@ -303,9 +303,15 @@ void page_fault_handler(TrapFrame* trap)
};
VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
- if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process().address_space(), userspace_sp)) {
- dbgln("Invalid stack pointer: {}", userspace_sp);
- return handle_crash(regs, "Bad stack on page fault", SIGSEGV);
+
+ if (!faulted_in_kernel) {
+ bool has_valid_stack_pointer = current_thread->process().address_space().with([&](auto& space) {
+ return MM.validate_user_stack(*space, userspace_sp);
+ });
+ if (!has_valid_stack_pointer) {
+ dbgln("Invalid stack pointer: {}", userspace_sp);
+ return handle_crash(regs, "Bad stack on page fault", SIGSEGV);
+ }
}
PageFault fault { regs.exception_code, VirtualAddress { fault_address } };
diff --git a/Kernel/Arch/x86/common/Spinlock.cpp b/Kernel/Arch/x86/common/Spinlock.cpp
index 4de2eecfc0..5af0b61495 100644
--- a/Kernel/Arch/x86/common/Spinlock.cpp
+++ b/Kernel/Arch/x86/common/Spinlock.cpp
@@ -55,6 +55,7 @@ u32 RecursiveSpinlock::lock()
void RecursiveSpinlock::unlock(u32 prev_flags)
{
+ VERIFY_INTERRUPTS_DISABLED();
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) {