summaryrefslogtreecommitdiff
path: root/Kernel/Memory
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-22 01:37:17 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-22 03:34:10 +0200
commit55adace359bfda606b445b5177ce5138687d4626 (patch)
tree429f7e24f71cde34f8f54f10b8ae43b74c514488 /Kernel/Memory
parent7d5d26b0481221d3ebf420de346cc33b3e003147 (diff)
downloadserenity-55adace359bfda606b445b5177ce5138687d4626.zip
Kernel: Rename SpinLock => Spinlock
Diffstat (limited to 'Kernel/Memory')
-rw-r--r--Kernel/Memory/AddressSpace.cpp30
-rw-r--r--Kernel/Memory/AddressSpace.h4
-rw-r--r--Kernel/Memory/AnonymousVMObject.cpp12
-rw-r--r--Kernel/Memory/AnonymousVMObject.h2
-rw-r--r--Kernel/Memory/InodeVMObject.cpp2
-rw-r--r--Kernel/Memory/MemoryManager.cpp66
-rw-r--r--Kernel/Memory/MemoryManager.h8
-rw-r--r--Kernel/Memory/PageDirectory.cpp6
-rw-r--r--Kernel/Memory/PageDirectory.h4
-rw-r--r--Kernel/Memory/Region.cpp24
-rw-r--r--Kernel/Memory/RingBuffer.h4
-rw-r--r--Kernel/Memory/VMObject.cpp4
-rw-r--r--Kernel/Memory/VMObject.h10
-rw-r--r--Kernel/Memory/VirtualRangeAllocator.cpp8
-rw-r--r--Kernel/Memory/VirtualRangeAllocator.h4
15 files changed, 94 insertions, 94 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp
index 3dc0d4cd43..83489e6903 100644
--- a/Kernel/Memory/AddressSpace.cpp
+++ b/Kernel/Memory/AddressSpace.cpp
@@ -5,7 +5,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/AddressSpace.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/InodeVMObject.h>
@@ -223,7 +223,7 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
if (m_region_lookup_cache.region.unsafe_ptr() == &region)
m_region_lookup_cache.region = nullptr;
@@ -235,7 +235,7 @@ NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
return m_region_lookup_cache.region.unsafe_ptr();
@@ -253,7 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
auto candidate = m_regions.find_largest_not_above(range.base().get());
if (!candidate)
return nullptr;
@@ -265,7 +265,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
Vector<Region*> regions = {};
size_t total_size_collected = 0;
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
auto found_region = m_regions.find_largest_not_above(range.base().get());
if (!found_region)
@@ -286,7 +286,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
Region* AddressSpace::add_region(NonnullOwnPtr<Region> region)
{
auto* ptr = region.ptr();
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
auto success = m_regions.try_insert(region->vaddr().get(), move(region));
return success ? ptr : nullptr;
}
@@ -324,7 +324,7 @@ void AddressSpace::dump_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
for (auto& sorted_region : m_regions) {
auto& region = *sorted_region;
@@ -342,13 +342,13 @@ void AddressSpace::dump_regions()
void AddressSpace::remove_all_regions(Badge<Process>)
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
m_regions.clear();
}
size_t AddressSpace::amount_dirty_private() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
@@ -362,7 +362,7 @@ size_t AddressSpace::amount_dirty_private() const
size_t AddressSpace::amount_clean_inode() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
HashTable<const InodeVMObject*> vmobjects;
for (auto& region : m_regions) {
if (region->vmobject().is_inode())
@@ -376,7 +376,7 @@ size_t AddressSpace::amount_clean_inode() const
size_t AddressSpace::amount_virtual() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
amount += region->size();
@@ -386,7 +386,7 @@ size_t AddressSpace::amount_virtual() const
size_t AddressSpace::amount_resident() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
for (auto& region : m_regions) {
@@ -397,7 +397,7 @@ size_t AddressSpace::amount_resident() const
size_t AddressSpace::amount_shared() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
@@ -411,7 +411,7 @@ size_t AddressSpace::amount_shared() const
size_t AddressSpace::amount_purgeable_volatile() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
if (!region->vmobject().is_anonymous())
@@ -425,7 +425,7 @@ size_t AddressSpace::amount_purgeable_volatile() const
size_t AddressSpace::amount_purgeable_nonvolatile() const
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
if (!region->vmobject().is_anonymous())
diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h
index e7705e266d..1d6f1f1123 100644
--- a/Kernel/Memory/AddressSpace.h
+++ b/Kernel/Memory/AddressSpace.h
@@ -55,7 +55,7 @@ public:
void remove_all_regions(Badge<Process>);
- RecursiveSpinLock& get_lock() const { return m_lock; }
+ RecursiveSpinlock& get_lock() const { return m_lock; }
size_t amount_clean_inode() const;
size_t amount_dirty_private() const;
@@ -68,7 +68,7 @@ public:
private:
explicit AddressSpace(NonnullRefPtr<PageDirectory>);
- mutable RecursiveSpinLock m_lock;
+ mutable RecursiveSpinlock m_lock;
RefPtr<PageDirectory> m_page_directory;
diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp
index 4d73523554..fb57dc5fcb 100644
--- a/Kernel/Memory/AnonymousVMObject.cpp
+++ b/Kernel/Memory/AnonymousVMObject.cpp
@@ -16,7 +16,7 @@ namespace Kernel::Memory {
KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
{
// We need to acquire our lock so we copy a sane state
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
if (is_purgeable() && is_volatile()) {
// If this object is purgeable+volatile, create a new zero-filled purgeable+volatile
@@ -178,7 +178,7 @@ AnonymousVMObject::~AnonymousVMObject()
size_t AnonymousVMObject::purge()
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
if (!is_purgeable() || !is_volatile())
return 0;
@@ -206,7 +206,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
{
VERIFY(is_purgeable());
- ScopedSpinLock locker(m_lock);
+ ScopedSpinlock locker(m_lock);
was_purged = m_was_purged;
if (m_volatile == is_volatile)
@@ -306,7 +306,7 @@ size_t AnonymousVMObject::cow_pages() const
PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
{
VERIFY_INTERRUPTS_DISABLED();
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
if (is_volatile()) {
// A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash.
@@ -379,13 +379,13 @@ AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages()
NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
{
- ScopedSpinLock locker(m_lock);
+ ScopedSpinlock locker(m_lock);
return m_committed_pages.take_one();
}
void AnonymousVMObject::SharedCommittedCowPages::uncommit_one()
{
- ScopedSpinLock locker(m_lock);
+ ScopedSpinlock locker(m_lock);
m_committed_pages.uncommit_one();
}
diff --git a/Kernel/Memory/AnonymousVMObject.h b/Kernel/Memory/AnonymousVMObject.h
index 04edca4314..4c839f7fc2 100644
--- a/Kernel/Memory/AnonymousVMObject.h
+++ b/Kernel/Memory/AnonymousVMObject.h
@@ -76,7 +76,7 @@ private:
void uncommit_one();
public:
- SpinLock<u8> m_lock;
+ Spinlock<u8> m_lock;
CommittedPhysicalPageSet m_committed_pages;
};
diff --git a/Kernel/Memory/InodeVMObject.cpp b/Kernel/Memory/InodeVMObject.cpp
index 4f6deefb83..6d1a4025e1 100644
--- a/Kernel/Memory/InodeVMObject.cpp
+++ b/Kernel/Memory/InodeVMObject.cpp
@@ -52,7 +52,7 @@ size_t InodeVMObject::amount_dirty() const
int InodeVMObject::release_all_clean_pages()
{
- ScopedSpinLock locker(m_lock);
+ ScopedSpinlock locker(m_lock);
int count = 0;
for (size_t i = 0; i < page_count(); ++i) {
diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp
index ff42562e36..17c208f0fa 100644
--- a/Kernel/Memory/MemoryManager.cpp
+++ b/Kernel/Memory/MemoryManager.cpp
@@ -47,7 +47,7 @@ namespace Kernel::Memory {
// run. If we do, then Singleton would get re-initialized, causing
// the memory manager to be initialized twice!
static MemoryManager* s_the;
-RecursiveSpinLock s_mm_lock;
+RecursiveSpinlock s_mm_lock;
MemoryManager& MemoryManager::the()
{
@@ -63,7 +63,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
{
s_the = this;
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
parse_memory_map();
write_cr3(kernel_page_directory().cr3());
protect_kernel_image();
@@ -88,7 +88,7 @@ UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
{
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
+ ScopedSpinlock page_lock(kernel_page_directory().get_lock());
// Disable writing to the kernel text and rodata segments.
for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
- ScopedSpinLock mm_lock(s_mm_lock);
+ ScopedSpinlock page_lock(kernel_page_directory().get_lock());
+ ScopedSpinlock mm_lock(s_mm_lock);
// Disable writing to the .ro_after_init section
for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
void MemoryManager::unmap_text_after_init()
{
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
- ScopedSpinLock mm_lock(s_mm_lock);
+ ScopedSpinlock page_lock(kernel_page_directory().get_lock());
+ ScopedSpinlock mm_lock(s_mm_lock);
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
@@ -135,8 +135,8 @@ void MemoryManager::unmap_text_after_init()
void MemoryManager::unmap_ksyms_after_init()
{
- ScopedSpinLock mm_lock(s_mm_lock);
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
+ ScopedSpinlock mm_lock(s_mm_lock);
+ ScopedSpinlock page_lock(kernel_page_directory().get_lock());
auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
@@ -413,7 +413,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// try to map the entire region into kernel space so we always have it
// We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
// mapped yet so we can't create them
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
// Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
auto page_tables_base = m_physical_pages_region->lower();
@@ -612,7 +612,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
for (auto& region : MM.m_kernel_regions) {
if (region.contains(vaddr))
return &region;
@@ -628,7 +628,7 @@ Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space,
Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
{
- ScopedSpinLock lock(space.get_lock());
+ ScopedSpinlock lock(space.get_lock());
return find_user_region_from_vaddr_no_lock(space, vaddr);
}
@@ -636,7 +636,7 @@ void MemoryManager::validate_syscall_preconditions(AddressSpace& space, Register
{
// We take the space lock once here and then use the no_lock variants
// to avoid excessive spinlock recursion in this extemely common path.
- ScopedSpinLock lock(space.get_lock());
+ ScopedSpinlock lock(space.get_lock());
auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) {
lock.unlock();
@@ -702,7 +702,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(kernel_page_directory().get_lock());
+ ScopedSpinlock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
@@ -721,7 +721,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
if (maybe_vm_object.is_error())
return {};
- ScopedSpinLock lock(kernel_page_directory().get_lock());
+ ScopedSpinlock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
@@ -734,7 +734,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
if (maybe_vm_object.is_error())
return {};
VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(kernel_page_directory().get_lock());
+ ScopedSpinlock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
@@ -755,7 +755,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(kernel_page_directory().get_lock());
+ ScopedSpinlock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
@@ -765,7 +765,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
return {};
@@ -778,7 +778,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>
{
VERIFY(page_count > 0);
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
m_system_memory_info.user_physical_pages_uncommitted += page_count;
@@ -787,7 +787,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>
void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
// Are we returning a user page?
for (auto& region : m_user_physical_regions) {
@@ -839,7 +839,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
auto page = find_free_user_physical_page(true);
if (should_zero_fill == ShouldZeroFill::Yes) {
auto* ptr = quickmap_page(*page);
@@ -851,7 +851,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
auto page = find_free_user_physical_page(false);
bool purged_pages = false;
@@ -893,7 +893,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
{
VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
size_t count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count);
@@ -911,7 +911,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
auto page = m_super_physical_region->take_free_page();
if (!page) {
@@ -934,7 +934,7 @@ void MemoryManager::enter_space(AddressSpace& space)
{
auto current_thread = Thread::current();
VERIFY(current_thread != nullptr);
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
current_thread->regs().cr3 = space.page_directory().cr3();
write_cr3(space.page_directory().cr3());
@@ -1006,7 +1006,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
VERIFY_INTERRUPTS_DISABLED();
auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@@ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
void MemoryManager::unquickmap_page()
{
VERIFY_INTERRUPTS_DISABLED();
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
auto& mm_data = get_data();
VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
@@ -1049,20 +1049,20 @@ bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddr
bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
{
- ScopedSpinLock lock(space.get_lock());
+ ScopedSpinlock lock(space.get_lock());
return validate_user_stack_no_lock(space, vaddr);
}
void MemoryManager::register_region(Region& region)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
if (region.is_kernel())
m_kernel_regions.append(region);
}
void MemoryManager::unregister_region(Region& region)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
if (region.is_kernel())
m_kernel_regions.remove(region);
}
@@ -1077,7 +1077,7 @@ void MemoryManager::dump_kernel_regions()
#endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
for (auto& region : m_kernel_regions) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
region.vaddr().get(),
@@ -1095,8 +1095,8 @@ void MemoryManager::dump_kernel_regions()
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
{
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock page_lock(kernel_page_directory().get_lock());
+ ScopedSpinlock lock(s_mm_lock);
auto* pte = ensure_pte(kernel_page_directory(), vaddr);
VERIFY(pte);
if (pte->is_writable() == writable)
diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h
index 5f545ba205..75f1ee2b5b 100644
--- a/Kernel/Memory/MemoryManager.h
+++ b/Kernel/Memory/MemoryManager.h
@@ -14,7 +14,7 @@
#include <Kernel/Arch/x86/PageFault.h>
#include <Kernel/Arch/x86/TrapFrame.h>
#include <Kernel/Forward.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/PhysicalRegion.h>
@@ -93,14 +93,14 @@ struct PhysicalMemoryRange {
struct MemoryManagerData {
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
- SpinLock<u8> m_quickmap_in_use;
+ Spinlock<u8> m_quickmap_in_use;
u32 m_quickmap_prev_flags;
PhysicalAddress m_last_quickmap_pd;
PhysicalAddress m_last_quickmap_pt;
};
-extern RecursiveSpinLock s_mm_lock;
+extern RecursiveSpinlock s_mm_lock;
// This class represents a set of committed physical pages.
// When you ask MemoryManager to commit pages for you, you get one of these in return.
@@ -197,7 +197,7 @@ public:
SystemMemoryInfo get_system_memory_info()
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
return m_system_memory_info;
}
diff --git a/Kernel/Memory/PageDirectory.cpp b/Kernel/Memory/PageDirectory.cpp
index 138e0faa72..703b45d3ae 100644
--- a/Kernel/Memory/PageDirectory.cpp
+++ b/Kernel/Memory/PageDirectory.cpp
@@ -27,7 +27,7 @@ static HashMap<FlatPtr, PageDirectory*>& cr3_map()
RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
return cr3_map().get(cr3).value_or({});
}
@@ -60,7 +60,7 @@ RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(VirtualRangeAlloca
}
// NOTE: Take the MM lock since we need it for quickmap.
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
#if ARCH(X86_64)
directory->m_pml4t = MM.allocate_user_physical_page();
@@ -159,7 +159,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
PageDirectory::~PageDirectory()
{
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock lock(s_mm_lock);
if (m_space)
cr3_map().remove(cr3());
}
diff --git a/Kernel/Memory/PageDirectory.h b/Kernel/Memory/PageDirectory.h
index 7770da0ec2..6d6e8e9036 100644
--- a/Kernel/Memory/PageDirectory.h
+++ b/Kernel/Memory/PageDirectory.h
@@ -44,7 +44,7 @@ public:
void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; }
- RecursiveSpinLock& get_lock() { return m_lock; }
+ RecursiveSpinlock& get_lock() { return m_lock; }
private:
PageDirectory();
@@ -61,7 +61,7 @@ private:
RefPtr<PhysicalPage> m_directory_pages[4];
#endif
HashMap<FlatPtr, NonnullRefPtr<PhysicalPage>> m_page_tables;
- RecursiveSpinLock m_lock;
+ RecursiveSpinlock m_lock;
};
}
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index 7937dda73f..30f11cea9a 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -43,8 +43,8 @@ Region::~Region()
MM.unregister_region(*this);
if (m_page_directory) {
- ScopedSpinLock page_lock(m_page_directory->get_lock());
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock page_lock(m_page_directory->get_lock());
+ ScopedSpinlock lock(s_mm_lock);
unmap(ShouldDeallocateVirtualRange::Yes);
VERIFY(!m_page_directory);
}
@@ -183,7 +183,7 @@ bool Region::map_individual_page_impl(size_t page_index)
}
// NOTE: We have to take the MM lock for PTE's to stay valid while we use them.
- ScopedSpinLock mm_locker(s_mm_lock);
+ ScopedSpinlock mm_locker(s_mm_lock);
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
if (!pte)
@@ -208,12 +208,12 @@ bool Region::map_individual_page_impl(size_t page_index)
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
{
- ScopedSpinLock lock(vmobject().m_lock);
+ ScopedSpinlock lock(vmobject().m_lock);
if (!m_page_directory)
return true; // not an error, region may have not yet mapped it
if (!translate_vmobject_page(page_index))
return true; // not an error, region doesn't map this page
- ScopedSpinLock page_lock(m_page_directory->get_lock());
+ ScopedSpinlock page_lock(m_page_directory->get_lock());
VERIFY(physical_page(page_index));
bool success = map_individual_page_impl(page_index);
if (with_flush)
@@ -236,8 +236,8 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
{
if (!m_page_directory)
return;
- ScopedSpinLock page_lock(m_page_directory->get_lock());
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock page_lock(m_page_directory->get_lock());
+ ScopedSpinlock lock(s_mm_lock);
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
@@ -259,8 +259,8 @@ void Region::set_page_directory(PageDirectory& page_directory)
bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
{
- ScopedSpinLock page_lock(page_directory.get_lock());
- ScopedSpinLock lock(s_mm_lock);
+ ScopedSpinlock page_lock(page_directory.get_lock());
+ ScopedSpinlock lock(s_mm_lock);
// FIXME: Find a better place for this sanity check(?)
if (is_user() && !is_shared()) {
@@ -338,7 +338,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
- ScopedSpinLock locker(vmobject().m_lock);
+ ScopedSpinlock locker(vmobject().m_lock);
if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
@@ -401,7 +401,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
{
- ScopedSpinLock locker(inode_vmobject.m_lock);
+ ScopedSpinlock locker(inode_vmobject.m_lock);
if (!vmobject_physical_page_entry.is_null()) {
dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
if (!remap_vmobject_page(page_index_in_vmobject))
@@ -433,7 +433,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
}
- ScopedSpinLock locker(inode_vmobject.m_lock);
+ ScopedSpinlock locker(inode_vmobject.m_lock);
if (!vmobject_physical_page_entry.is_null()) {
// Someone else faulted in this page while we were reading from the inode.
diff --git a/Kernel/Memory/RingBuffer.h b/Kernel/Memory/RingBuffer.h
index 58e4208a38..50a3afe323 100644
--- a/Kernel/Memory/RingBuffer.h
+++ b/Kernel/Memory/RingBuffer.h
@@ -23,7 +23,7 @@ public:
void reclaim_space(PhysicalAddress chunk_start, size_t chunk_size);
PhysicalAddress start_of_used() const;
- SpinLock<u8>& lock() { return m_lock; }
+ Spinlock<u8>& lock() { return m_lock; }
size_t used_bytes() const { return m_num_used_bytes; }
PhysicalAddress start_of_region() const { return m_region->physical_page(0)->paddr(); }
VirtualAddress vaddr() const { return m_region->vaddr(); }
@@ -31,7 +31,7 @@ public:
private:
OwnPtr<Memory::Region> m_region;
- SpinLock<u8> m_lock;
+ Spinlock<u8> m_lock;
size_t m_start_of_used {};
size_t m_num_used_bytes {};
size_t m_capacity_in_bytes {};
diff --git a/Kernel/Memory/VMObject.cpp b/Kernel/Memory/VMObject.cpp
index 837611a370..7ccb723180 100644
--- a/Kernel/Memory/VMObject.cpp
+++ b/Kernel/Memory/VMObject.cpp
@@ -10,9 +10,9 @@
namespace Kernel::Memory {
-static Singleton<SpinLockProtected<VMObject::AllInstancesList>> s_all_instances;
+static Singleton<SpinlockProtected<VMObject::AllInstancesList>> s_all_instances;
-SpinLockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
+SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
{
return s_all_instances;
}
diff --git a/Kernel/Memory/VMObject.h b/Kernel/Memory/VMObject.h
index c6f06b2ce8..60addc27ad 100644
--- a/Kernel/Memory/VMObject.h
+++ b/Kernel/Memory/VMObject.h
@@ -43,13 +43,13 @@ public:
ALWAYS_INLINE void add_region(Region& region)
{
- ScopedSpinLock locker(m_lock);
+ ScopedSpinlock locker(m_lock);
m_regions.append(region);
}
ALWAYS_INLINE void remove_region(Region& region)
{
- ScopedSpinLock locker(m_lock);
+ ScopedSpinlock locker(m_lock);
m_regions.remove(region);
}
@@ -63,7 +63,7 @@ protected:
IntrusiveListNode<VMObject> m_list_node;
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
- mutable RecursiveSpinLock m_lock;
+ mutable RecursiveSpinlock m_lock;
private:
VMObject& operator=(VMObject const&) = delete;
@@ -74,13 +74,13 @@ private:
public:
using AllInstancesList = IntrusiveList<VMObject, RawPtr<VMObject>, &VMObject::m_list_node>;
- static SpinLockProtected<VMObject::AllInstancesList>& all_instances();
+ static SpinlockProtected<VMObject::AllInstancesList>& all_instances();
};
template<typename Callback>
inline void VMObject::for_each_region(Callback callback)
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
for (auto& region : m_regions) {
callback(region);
}
diff --git a/Kernel/Memory/VirtualRangeAllocator.cpp b/Kernel/Memory/VirtualRangeAllocator.cpp
index 16bccdfb6e..351ec0cd8b 100644
--- a/Kernel/Memory/VirtualRangeAllocator.cpp
+++ b/Kernel/Memory/VirtualRangeAllocator.cpp
@@ -25,7 +25,7 @@ void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t si
void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
{
- ScopedSpinLock lock(parent_allocator.m_lock);
+ ScopedSpinlock lock(parent_allocator.m_lock);
m_total_range = parent_allocator.m_total_range;
m_available_ranges.clear();
for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
@@ -103,7 +103,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
return {};
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
auto& available_range = *it;
@@ -142,7 +142,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
return {};
}
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
auto& available_range = *it;
if (!available_range.contains(base, size))
@@ -159,7 +159,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
void VirtualRangeAllocator::deallocate(VirtualRange const& range)
{
- ScopedSpinLock lock(m_lock);
+ ScopedSpinlock lock(m_lock);
VERIFY(m_total_range.contains(range));
VERIFY(range.size());
VERIFY((range.size() % PAGE_SIZE) == 0);
diff --git a/Kernel/Memory/VirtualRangeAllocator.h b/Kernel/Memory/VirtualRangeAllocator.h
index b64b4c3875..5835e5a543 100644
--- a/Kernel/Memory/VirtualRangeAllocator.h
+++ b/Kernel/Memory/VirtualRangeAllocator.h
@@ -8,7 +8,7 @@
#include <AK/RedBlackTree.h>
#include <AK/Traits.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/VirtualRange.h>
namespace Kernel::Memory {
@@ -35,7 +35,7 @@ private:
RedBlackTree<FlatPtr, VirtualRange> m_available_ranges;
VirtualRange m_total_range;
- mutable SpinLock<u8> m_lock;
+ mutable Spinlock<u8> m_lock;
};
}