summaryrefslogtreecommitdiff
path: root/Kernel/VM/AnonymousVMObject.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2020-02-15 13:12:02 +0100
committerAndreas Kling <kling@serenityos.org>2020-02-15 13:17:40 +0100
commitc624d3875ecfdcb77ea2cd69ff40a3a933da33d3 (patch)
treeda36ea358878eea370a847eb857b5dddb273e144 /Kernel/VM/AnonymousVMObject.cpp
parenta4d857e3c5e2040309613256f12c2de348d2b9ba (diff)
downloadserenity-c624d3875ecfdcb77ea2cd69ff40a3a933da33d3.zip
Kernel: Use a shared physical page for zero-filled pages until written
This patch adds a globally shared zero-filled PhysicalPage that will be mapped into every slot of every zero-filled AnonymousVMObject until that page is written to, achieving CoW-like zero-filled pages. Initial testing show that this doesn't actually achieve any sharing yet but it seems like a good design regardless, since it may reduce the number of page faults taken by programs. If you look at the refcount of MM.shared_zero_page() it will have quite a high refcount, but that's just because everything maps it everywhere. If you want to see the "real" refcount, you can build with the MAP_SHARED_ZERO_PAGE_LAZILY flag, and we'll defer mapping of the shared zero page until the first NP read fault. I've left this behavior behind a flag for future testing of this code.
Diffstat (limited to 'Kernel/VM/AnonymousVMObject.cpp')
-rw-r--r--Kernel/VM/AnonymousVMObject.cpp5
1 files changed, 5 insertions, 0 deletions
diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp
index 60e905f4ec..0cc0711d86 100644
--- a/Kernel/VM/AnonymousVMObject.cpp
+++ b/Kernel/VM/AnonymousVMObject.cpp
@@ -25,6 +25,7 @@
*/
#include <Kernel/VM/AnonymousVMObject.h>
+#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PhysicalPage.h>
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
@@ -51,6 +52,10 @@ NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_physical_page(Ph
AnonymousVMObject::AnonymousVMObject(size_t size)
: VMObject(size)
{
+#ifndef MAP_SHARED_ZERO_PAGE_LAZILY
+ for (size_t i = 0; i < page_count(); ++i)
+ physical_pages()[i] = MM.shared_zero_page();
+#endif
}
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)