summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorIdan Horowitz <idan.horowitz@gmail.com>2021-08-05 20:58:09 +0300
committerAndreas Kling <kling@serenityos.org>2021-08-05 20:26:47 +0200
commit3e909c0c49d687f1e1aa53cf9a8ed7ffecffbf2a (patch)
tree430415ad32e7b194061eaac4f81886199982cb4e /Kernel
parente8d10fb42933f548add3076cb1beaa4dc201d9d4 (diff)
downloadserenity-3e909c0c49d687f1e1aa53cf9a8ed7ffecffbf2a.zip
Kernel: Remove double-counting of allocated pages in AnonymousVMObject
When constructing an AnonymousVMObject with the AllocateNow allocation strategy we accidentally allocated the committed pages directly through MemoryManager instead of taking them from our m_unused_physical_pages CommittedPhysicalPageSet, which meant they were counted as allocated in MemoryManager, but were still counted as unallocated in the PageSet, who would then try to uncommit them on destruction, resulting in a failed assertion. To help prevent similar issues in the future a Badge<T> was added to MM::allocate_committed_user_physical_page to prevent allocation of commited pages not via a CommittedPhysicalPageSet.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/VM/AnonymousVMObject.cpp2
-rw-r--r--Kernel/VM/MemoryManager.cpp4
-rw-r--r--Kernel/VM/MemoryManager.h2
3 files changed, 4 insertions, 4 deletions
diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp
index 5bd9297d66..84f8e17b26 100644
--- a/Kernel/VM/AnonymousVMObject.cpp
+++ b/Kernel/VM/AnonymousVMObject.cpp
@@ -131,7 +131,7 @@ AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, O
if (strategy == AllocationStrategy::AllocateNow) {
// Allocate all pages right now. We know we can get all because we committed the amount needed
for (size_t i = 0; i < page_count(); ++i)
- physical_pages()[i] = MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
+ physical_pages()[i] = m_unused_committed_pages->take_one();
} else {
auto& initial_page = (strategy == AllocationStrategy::Reserve) ? MM.lazy_committed_page() : MM.shared_zero_page();
for (size_t i = 0; i < page_count(); ++i)
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 2df0469669..5e769fa6eb 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -840,7 +840,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
return page;
}
-NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(ShouldZeroFill should_zero_fill)
+NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{
ScopedSpinLock lock(s_mm_lock);
auto page = find_free_user_physical_page(true);
@@ -1134,7 +1134,7 @@ NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
{
VERIFY(m_page_count > 0);
--m_page_count;
- return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
+ return MM.allocate_committed_user_physical_page({}, MemoryManager::ShouldZeroFill::Yes);
}
void CommittedPhysicalPageSet::uncommit_one()
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index f548e182ca..20aafea3d4 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -174,7 +174,7 @@ public:
Optional<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
- NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
+ NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);