summaryrefslogtreecommitdiff
path: root/Kernel/MemoryManager.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-02-03 01:36:25 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-02-03 01:36:25 +0100
commitabe3f515b16a9bf32dd642a123698f70aab76bbe (patch)
tree6d9b23f2effd4e915e4c04e6f57c934cfd8893f0 /Kernel/MemoryManager.cpp
parent7f91aec25cd2b517e8a4bacc2b35e08a0556e9d4 (diff)
downloadserenity-abe3f515b16a9bf32dd642a123698f70aab76bbe.zip
Make font loading use mmap().
This exposed a serious race condition in page_in_from_inode(). Reordered the logic and added a paging lock to VMObject. Now, only one process can page in from a VMObject at a time. There are definitely ways to optimize this, for instance by making the locking be per-page instead. It's not something that I'm going to worry about right now though.
Diffstat (limited to 'Kernel/MemoryManager.cpp')
-rw-r--r--Kernel/MemoryManager.cpp39
1 files changed, 29 insertions, 10 deletions
diff --git a/Kernel/MemoryManager.cpp b/Kernel/MemoryManager.cpp
index c7524e7ab5..a992bbf519 100644
--- a/Kernel/MemoryManager.cpp
+++ b/Kernel/MemoryManager.cpp
@@ -301,30 +301,49 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
auto& vmo = region.vmo();
ASSERT(!vmo.is_anonymous());
ASSERT(vmo.inode());
- auto& inode = *vmo.inode();
+
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
- ASSERT(vmo_page.is_null());
- vmo_page = allocate_physical_page(ShouldZeroFill::No);
- if (vmo_page.is_null()) {
- kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
- return false;
+
+ bool interrupts_were_enabled = are_interrupts_enabled();
+
+ if (!interrupts_were_enabled)
+ sti();
+
+ LOCKER(vmo.m_paging_lock);
+
+ if (!interrupts_were_enabled)
+ cli();
+
+ if (!vmo_page.is_null()) {
+ kprintf("MM: page_in_from_inode was served by someone else while lock was held\n");
+ remap_region_page(region, page_index_in_region, true);
+ return true;
}
- remap_region_page(region, page_index_in_region, true);
- byte* dest_ptr = region.laddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
+
#ifdef MM_DEBUG
dbgprintf("MM: page_in_from_inode ready to read from inode, will write to L%x!\n", dest_ptr);
#endif
sti(); // Oh god here we go...
- auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, dest_ptr, nullptr);
+ byte page_buffer[PAGE_SIZE];
+ auto& inode = *vmo.inode();
+ auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, page_buffer, nullptr);
if (nread < 0) {
kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
return false;
}
if (nread < PAGE_SIZE) {
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
- memset(dest_ptr + nread, 0, PAGE_SIZE - nread);
+ memset(page_buffer + nread, 0, PAGE_SIZE - nread);
}
cli();
+ vmo_page = allocate_physical_page(ShouldZeroFill::No);
+ if (vmo_page.is_null()) {
+ kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
+ return false;
+ }
+ remap_region_page(region, page_index_in_region, true);
+ byte* dest_ptr = region.laddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
+ memcpy(dest_ptr, page_buffer, PAGE_SIZE);
return true;
}