summaryrefslogtreecommitdiff
path: root/Kernel/Memory/AddressSpace.h
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-19 20:53:40 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-20 17:20:43 +0200
commit11eee67b8510767d76fb4793e3b62ac1793dd723 (patch)
tree8ce47a3813ce74bba56c60f62b29bdd6cdf287da /Kernel/Memory/AddressSpace.h
parente475263113387404e63cdc3666391934604eb6e7 (diff)
downloadserenity-11eee67b8510767d76fb4793e3b62ac1793dd723.zip
Kernel: Make self-contained locking smart pointers their own classes
Until now, our kernel has reimplemented a number of AK classes to provide automatic internal locking: - RefPtr - NonnullRefPtr - WeakPtr - Weakable This patch renames the Kernel classes so that they can coexist with the original AK classes: - RefPtr => LockRefPtr - NonnullRefPtr => NonnullLockRefPtr - WeakPtr => LockWeakPtr - Weakable => LockWeakable The goal here is to eventually get rid of the Lock* classes in favor of using external locking.
Diffstat (limited to 'Kernel/Memory/AddressSpace.h')
-rw-r--r--Kernel/Memory/AddressSpace.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h
index 12b2212659..e07cb3988a 100644
--- a/Kernel/Memory/AddressSpace.h
+++ b/Kernel/Memory/AddressSpace.h
@@ -9,7 +9,7 @@
#include <AK/RedBlackTree.h>
#include <AK/Vector.h>
-#include <AK/WeakPtr.h>
+#include <Kernel/Library/LockWeakPtr.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/Region.h>
@@ -33,8 +33,8 @@ public:
ErrorOr<void> unmap_mmap_range(VirtualAddress, size_t);
- ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
- ErrorOr<Region*> allocate_region_with_vmobject(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
+ ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
+ ErrorOr<Region*> allocate_region_with_vmobject(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
ErrorOr<Region*> allocate_region(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
void deallocate_region(Region& region);
NonnullOwnPtr<Region> take_region(Region& region);
@@ -65,11 +65,11 @@ public:
auto& region_tree() { return m_region_tree; }
private:
- AddressSpace(NonnullRefPtr<PageDirectory>, VirtualRange total_range);
+ AddressSpace(NonnullLockRefPtr<PageDirectory>, VirtualRange total_range);
mutable RecursiveSpinlock m_lock { LockRank::None };
- RefPtr<PageDirectory> m_page_directory;
+ LockRefPtr<PageDirectory> m_page_directory;
RegionTree m_region_tree;