diff options
author | Andreas Kling <kling@serenityos.org> | 2022-08-19 20:53:40 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-08-20 17:20:43 +0200 |
commit | 11eee67b8510767d76fb4793e3b62ac1793dd723 (patch) | |
tree | 8ce47a3813ce74bba56c60f62b29bdd6cdf287da /Kernel/Memory/SharedInodeVMObject.cpp | |
parent | e475263113387404e63cdc3666391934604eb6e7 (diff) | |
download | serenity-11eee67b8510767d76fb4793e3b62ac1793dd723.zip |
Kernel: Make self-contained locking smart pointers their own classes
Until now, our kernel has reimplemented a number of AK classes to
provide automatic internal locking:
- RefPtr
- NonnullRefPtr
- WeakPtr
- Weakable
This patch renames the Kernel classes so that they can coexist with
the original AK classes:
- RefPtr => LockRefPtr
- NonnullRefPtr => NonnullLockRefPtr
- WeakPtr => LockWeakPtr
- Weakable => LockWeakable
The goal here is to eventually get rid of the Lock* classes in favor of
using external locking.
Diffstat (limited to 'Kernel/Memory/SharedInodeVMObject.cpp')
-rw-r--r-- | Kernel/Memory/SharedInodeVMObject.cpp | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/Kernel/Memory/SharedInodeVMObject.cpp b/Kernel/Memory/SharedInodeVMObject.cpp index 2f1e3e27cb..74853c8600 100644 --- a/Kernel/Memory/SharedInodeVMObject.cpp +++ b/Kernel/Memory/SharedInodeVMObject.cpp @@ -10,31 +10,31 @@ namespace Kernel::Memory { -ErrorOr<NonnullRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode) +ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode) { size_t size = inode.size(); if (auto shared_vmobject = inode.shared_vmobject()) return shared_vmobject.release_nonnull(); auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size)); auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false)); - auto vmobject = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedInodeVMObject(inode, move(new_physical_pages), move(dirty_pages)))); + auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedInodeVMObject(inode, move(new_physical_pages), move(dirty_pages)))); TRY(vmobject->inode().set_shared_vmobject(*vmobject)); return vmobject; } -ErrorOr<NonnullRefPtr<VMObject>> SharedInodeVMObject::try_clone() +ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone() { auto new_physical_pages = TRY(this->try_clone_physical_pages()); auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false)); - return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); + return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); } -SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) +SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) { } -SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) +SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) { } |