summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-08-21 21:49:50 -0600
committerAndreas Kling <kling@serenityos.org>2020-08-25 09:48:48 +0200
commit08a569fbe0567eb4d8aebb08d3021c4103684635 (patch)
tree2b3b0a6cb88599a18df9df33c7be4f24d9541ec4 /Kernel
parent3320bb45d1cd5df66e3f29c55f5aacb112308b95 (diff)
downloadserenity-08a569fbe0567eb4d8aebb08d3021c4103684635.zip
Kernel: Make PhysicalPage not movable and use atomic ref counting
We should not be moving ref-counted objects.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/VM/MemoryManager.cpp8
-rw-r--r--Kernel/VM/MemoryManager.h4
-rw-r--r--Kernel/VM/PhysicalPage.cpp10
-rw-r--r--Kernel/VM/PhysicalPage.h18
-rw-r--r--Kernel/VM/PhysicalRegion.h4
5 files changed, 20 insertions, 24 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 8f53a7f18a..53716a53e9 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -378,7 +378,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
}
-void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
+void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
{
ScopedSpinLock lock(s_mm_lock);
for (auto& region : m_user_physical_regions) {
@@ -387,7 +387,7 @@ void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
continue;
}
- region.return_page(move(page));
+ region.return_page(page);
--m_user_physical_pages_used;
return;
@@ -448,7 +448,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
return page;
}
-void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
+void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page)
{
ScopedSpinLock lock(s_mm_lock);
for (auto& region : m_super_physical_regions) {
@@ -457,7 +457,7 @@ void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
continue;
}
- region.return_page(move(page));
+ region.return_page(page);
--m_super_physical_pages_used;
return;
}
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index a7a324ed0d..f2c3bde451 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -114,8 +114,8 @@ public:
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
- void deallocate_user_physical_page(PhysicalPage&&);
- void deallocate_supervisor_physical_page(PhysicalPage&&);
+ void deallocate_user_physical_page(const PhysicalPage&);
+ void deallocate_supervisor_physical_page(const PhysicalPage&);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
diff --git a/Kernel/VM/PhysicalPage.cpp b/Kernel/VM/PhysicalPage.cpp
index 1c4e9ed673..0a7818c7b5 100644
--- a/Kernel/VM/PhysicalPage.cpp
+++ b/Kernel/VM/PhysicalPage.cpp
@@ -42,18 +42,14 @@ PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_retu
{
}
-void PhysicalPage::return_to_freelist() &&
+void PhysicalPage::return_to_freelist() const
{
ASSERT((paddr().get() & ~PAGE_MASK) == 0);
- InterruptDisabler disabler;
-
- m_ref_count = 1;
-
if (m_supervisor)
- MM.deallocate_supervisor_physical_page(move(*this));
+ MM.deallocate_supervisor_physical_page(*this);
else
- MM.deallocate_user_physical_page(move(*this));
+ MM.deallocate_user_physical_page(*this);
#ifdef MM_DEBUG
dbg() << "MM: P" << String::format("%x", m_paddr.get()) << " released to freelist";
diff --git a/Kernel/VM/PhysicalPage.h b/Kernel/VM/PhysicalPage.h
index 498ce7dd03..92dcd20e24 100644
--- a/Kernel/VM/PhysicalPage.h
+++ b/Kernel/VM/PhysicalPage.h
@@ -39,29 +39,29 @@ class PhysicalPage {
friend class PageDirectory;
friend class VMObject;
- MAKE_SLAB_ALLOCATED(PhysicalPage)
+ MAKE_SLAB_ALLOCATED(PhysicalPage);
+ AK_MAKE_NONMOVABLE(PhysicalPage);
+
public:
PhysicalAddress paddr() const { return m_paddr; }
void ref()
{
- ASSERT(m_ref_count);
- ++m_ref_count;
+ m_ref_count.fetch_add(1, AK::memory_order_acq_rel);
}
void unref()
{
- ASSERT(m_ref_count);
- if (!--m_ref_count) {
+ if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) {
if (m_may_return_to_freelist)
- move(*this).return_to_freelist();
+ return_to_freelist();
delete this;
}
}
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
- u32 ref_count() const { return m_ref_count; }
+ u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
bool is_shared_zero_page() const;
@@ -69,9 +69,9 @@ private:
PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
~PhysicalPage() {}
- void return_to_freelist() &&;
+ void return_to_freelist() const;
- u32 m_ref_count { 1 };
+ Atomic<u32> m_ref_count { 1 };
bool m_may_return_to_freelist { true };
bool m_supervisor { false };
PhysicalAddress m_paddr;
diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h
index 0a10d5c9d6..bd43d22fea 100644
--- a/Kernel/VM/PhysicalRegion.h
+++ b/Kernel/VM/PhysicalRegion.h
@@ -49,12 +49,12 @@ public:
unsigned size() const { return m_pages; }
unsigned used() const { return m_used; }
unsigned free() const { return m_pages - m_used; }
- bool contains(PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
+ bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor);
void return_page_at(PhysicalAddress addr);
- void return_page(PhysicalPage&& page) { return_page_at(page.paddr()); }
+ void return_page(const PhysicalPage& page) { return_page_at(page.paddr()); }
private:
unsigned find_contiguous_free_pages(size_t count);