summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-08 22:18:26 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-08 22:23:29 +0100
commit9ca42c4c0e74a8807c4a35a522a1365419e56a1a (patch)
tree38e6ed4378426095a4b308fa84b42a0b785ff796 /Kernel
parent8bda30edd28be7b292cd18b69abe6490e66d7d0b (diff)
downloadserenity-9ca42c4c0e74a8807c4a35a522a1365419e56a1a.zip
Kernel: Always hold space lock while calculating memory statistics
And put the locker at the top of the functions for clarity.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/VM/Space.cpp22
1 files changed, 10 insertions, 12 deletions
diff --git a/Kernel/VM/Space.cpp b/Kernel/VM/Space.cpp
index 16ef065fba..b3bdc90973 100644
--- a/Kernel/VM/Space.cpp
+++ b/Kernel/VM/Space.cpp
@@ -222,11 +222,11 @@ void Space::remove_all_regions(Badge<Process>)
size_t Space::amount_dirty_private() const
{
+ ScopedSpinLock lock(m_lock);
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
size_t amount = 0;
- ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
if (!region.is_shared())
amount += region.amount_dirty();
@@ -236,13 +236,11 @@ size_t Space::amount_dirty_private() const
size_t Space::amount_clean_inode() const
{
+ ScopedSpinLock lock(m_lock);
HashTable<const InodeVMObject*> vmobjects;
- {
- ScopedSpinLock lock(m_lock);
- for (auto& region : m_regions) {
- if (region.vmobject().is_inode())
- vmobjects.set(&static_cast<const InodeVMObject&>(region.vmobject()));
- }
+ for (auto& region : m_regions) {
+ if (region.vmobject().is_inode())
+ vmobjects.set(&static_cast<const InodeVMObject&>(region.vmobject()));
}
size_t amount = 0;
for (auto& vmobject : vmobjects)
@@ -252,8 +250,8 @@ size_t Space::amount_clean_inode() const
size_t Space::amount_virtual() const
{
- size_t amount = 0;
ScopedSpinLock lock(m_lock);
+ size_t amount = 0;
for (auto& region : m_regions) {
amount += region.size();
}
@@ -262,9 +260,9 @@ size_t Space::amount_virtual() const
size_t Space::amount_resident() const
{
+ ScopedSpinLock lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
- ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
amount += region.amount_resident();
}
@@ -273,12 +271,12 @@ size_t Space::amount_resident() const
size_t Space::amount_shared() const
{
+ ScopedSpinLock lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
// so that every Region contributes +1 ref to each of its PhysicalPages.
size_t amount = 0;
- ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
amount += region.amount_shared();
}
@@ -287,8 +285,8 @@ size_t Space::amount_shared() const
size_t Space::amount_purgeable_volatile() const
{
- size_t amount = 0;
ScopedSpinLock lock(m_lock);
+ size_t amount = 0;
for (auto& region : m_regions) {
if (region.vmobject().is_anonymous() && static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
amount += region.amount_resident();
@@ -298,8 +296,8 @@ size_t Space::amount_purgeable_volatile() const
size_t Space::amount_purgeable_nonvolatile() const
{
- size_t amount = 0;
ScopedSpinLock lock(m_lock);
+ size_t amount = 0;
for (auto& region : m_regions) {
if (region.vmobject().is_anonymous() && !static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
amount += region.amount_resident();