summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-23 17:58:05 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-24 14:57:51 +0200
commitcf16b2c8e64709d570c5f54a981017d217e95ed0 (patch)
tree16c9efdaaa579ae51682a51a58949ce02c3d2092 /Kernel/Memory
parentd6ef18f587d4a7e4f58487c84e0b9eb260f3ec5a (diff)
downloadserenity-cf16b2c8e64709d570c5f54a981017d217e95ed0.zip
Kernel: Wrap process address spaces in SpinlockProtected
This forces anyone who wants to look into and/or manipulate an address space to lock it. And this replaces the previous, more flimsy, manual spinlock use. Note that pointers *into* the address space are not safe to use after you unlock the space. We've got many issues like this, and we'll have to track those down as wlel.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/AddressSpace.h4
-rw-r--r--Kernel/Memory/MemoryManager.cpp70
-rw-r--r--Kernel/Memory/MemoryManager.h4
-rw-r--r--Kernel/Memory/RegionTree.h6
4 files changed, 35 insertions, 49 deletions
diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h
index 23f7c7a76b..e2df65e0b6 100644
--- a/Kernel/Memory/AddressSpace.h
+++ b/Kernel/Memory/AddressSpace.h
@@ -53,8 +53,6 @@ public:
void remove_all_regions(Badge<Process>);
- RecursiveSpinlock& get_lock() const { return m_lock; }
-
ErrorOr<size_t> amount_clean_inode() const;
size_t amount_dirty_private() const;
size_t amount_virtual() const;
@@ -66,8 +64,6 @@ public:
private:
AddressSpace(NonnullLockRefPtr<PageDirectory>, VirtualRange total_range);
- mutable RecursiveSpinlock m_lock { LockRank::None };
-
LockRefPtr<PageDirectory> m_page_directory;
// NOTE: The total range is also in the RegionTree, but since it never changes,
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index 01d73bc959..3ab26a5731 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -646,40 +646,32 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress address)
return MM.m_region_tree.with([&](auto& region_tree) { return region_tree.find_region_containing(address); });
}
-Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
-{
- VERIFY(space.get_lock().is_locked_by_current_processor());
- return space.find_region_containing({ vaddr, 1 });
-}
-
Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
{
- SpinlockLocker lock(space.get_lock());
- return find_user_region_from_vaddr_no_lock(space, vaddr);
+ return space.find_region_containing({ vaddr, 1 });
}
-void MemoryManager::validate_syscall_preconditions(AddressSpace& space, RegisterState const& regs)
+void MemoryManager::validate_syscall_preconditions(Process& process, RegisterState const& regs)
{
- // We take the space lock once here and then use the no_lock variants
- // to avoid excessive spinlock recursion in this extremely common path.
- SpinlockLocker lock(space.get_lock());
-
- auto unlock_and_handle_crash = [&lock, &regs](char const* description, int signal) {
- lock.unlock();
- handle_crash(regs, description, signal);
+ bool should_crash = false;
+ char const* crash_description = nullptr;
+ int crash_signal = 0;
+
+ auto unlock_and_handle_crash = [&](char const* description, int signal) {
+ should_crash = true;
+ crash_description = description;
+ crash_signal = signal;
};
- {
+ process.address_space().with([&](auto& space) -> void {
VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
- if (!MM.validate_user_stack_no_lock(space, userspace_sp)) {
+ if (!MM.validate_user_stack(*space, userspace_sp)) {
dbgln("Invalid stack pointer: {}", userspace_sp);
return unlock_and_handle_crash("Bad stack on syscall entry", SIGSEGV);
}
- }
- {
VirtualAddress ip = VirtualAddress { regs.ip() };
- auto* calling_region = MM.find_user_region_from_vaddr_no_lock(space, ip);
+ auto* calling_region = MM.find_user_region_from_vaddr(*space, ip);
if (!calling_region) {
dbgln("Syscall from {:p} which has no associated region", ip);
return unlock_and_handle_crash("Syscall from unknown region", SIGSEGV);
@@ -690,10 +682,14 @@ void MemoryManager::validate_syscall_preconditions(AddressSpace& space, Register
return unlock_and_handle_crash("Syscall from writable memory", SIGSEGV);
}
- if (space.enforces_syscall_regions() && !calling_region->is_syscall_region()) {
+ if (space->enforces_syscall_regions() && !calling_region->is_syscall_region()) {
dbgln("Syscall from non-syscall region");
return unlock_and_handle_crash("Syscall from non-syscall region", SIGSEGV);
}
+ });
+
+ if (should_crash) {
+ handle_crash(regs, crash_description, crash_signal);
}
}
@@ -830,12 +826,20 @@ ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_physical_pages(size_t pa
dbgln("MM: Unable to commit {} pages, have only {}", page_count, m_system_memory_info.physical_pages_uncommitted);
Process::for_each([&](Process const& process) {
+ size_t amount_resident = 0;
+ size_t amount_shared = 0;
+ size_t amount_virtual = 0;
+ process.address_space().with([&](auto& space) {
+ amount_resident = space->amount_resident();
+ amount_shared = space->amount_shared();
+ amount_virtual = space->amount_virtual();
+ });
dbgln("{}({}) resident:{}, shared:{}, virtual:{}",
process.name(),
process.pid(),
- process.address_space().amount_resident() / PAGE_SIZE,
- process.address_space().amount_shared() / PAGE_SIZE,
- process.address_space().amount_virtual() / PAGE_SIZE);
+ amount_resident / PAGE_SIZE,
+ amount_shared / PAGE_SIZE,
+ amount_virtual / PAGE_SIZE);
return IterationDecision::Continue;
});
@@ -1007,7 +1011,9 @@ ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguou
void MemoryManager::enter_process_address_space(Process& process)
{
- enter_address_space(process.address_space());
+ process.address_space().with([](auto& space) {
+ enter_address_space(*space);
+ });
}
void MemoryManager::enter_address_space(AddressSpace& space)
@@ -1100,23 +1106,15 @@ void MemoryManager::unquickmap_page()
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
}
-bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const
+bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
{
- VERIFY(space.get_lock().is_locked_by_current_processor());
-
if (!is_user_address(vaddr))
return false;
- auto* region = find_user_region_from_vaddr_no_lock(space, vaddr);
+ auto* region = find_user_region_from_vaddr(space, vaddr);
return region && region->is_user() && region->is_stack();
}
-bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
-{
- SpinlockLocker lock(space.get_lock());
- return validate_user_stack_no_lock(space, vaddr);
-}
-
void MemoryManager::unregister_kernel_region(Region& region)
{
VERIFY(region.is_kernel());
diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h
index 6bf827773c..4b62ed4e45 100644
--- a/Kernel/Memory/MemoryManager.h
+++ b/Kernel/Memory/MemoryManager.h
@@ -159,7 +159,6 @@ public:
static void enter_process_address_space(Process&);
static void enter_address_space(AddressSpace&);
- bool validate_user_stack_no_lock(AddressSpace&, VirtualAddress) const;
bool validate_user_stack(AddressSpace&, VirtualAddress) const;
enum class ShouldZeroFill {
@@ -222,8 +221,7 @@ public:
}
static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress);
- static Region* find_user_region_from_vaddr_no_lock(AddressSpace&, VirtualAddress);
- static void validate_syscall_preconditions(AddressSpace&, RegisterState const&);
+ static void validate_syscall_preconditions(Process&, RegisterState const&);
void dump_kernel_regions();
diff --git a/Kernel/Memory/RegionTree.h b/Kernel/Memory/RegionTree.h
index aa5226d448..e028bb3130 100644
--- a/Kernel/Memory/RegionTree.h
+++ b/Kernel/Memory/RegionTree.h
@@ -45,9 +45,6 @@ public:
void delete_all_regions_assuming_they_are_unmapped();
- // FIXME: Access the region tree through a SpinlockProtected or similar.
- RecursiveSpinlock& get_lock() const { return m_lock; }
-
bool remove(Region&);
Region* find_region_containing(VirtualAddress);
@@ -58,9 +55,6 @@ private:
ErrorOr<VirtualRange> allocate_range_specific(VirtualAddress base, size_t size);
ErrorOr<VirtualRange> allocate_range_randomized(size_t size, size_t alignment = PAGE_SIZE);
- // FIXME: We need a Region rank, but we don't know where to put it.
- RecursiveSpinlock mutable m_lock { LockRank::None };
-
IntrusiveRedBlackTree<&Region::m_tree_node> m_regions;
VirtualRange const m_total_range;
};