summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-08-26 13:47:17 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-08-26 13:50:55 +0200
commitdde10f534f5d6631dd5b43facfc0ccdfc31890d8 (patch)
tree360e50051e9f0ffcfc7d32c6a34a3f1af93419fb /Kernel
parentf5d779f47e5d1859571ec0778d78454a610602f9 (diff)
downloadserenity-dde10f534f5d6631dd5b43facfc0ccdfc31890d8.zip
Revert "Kernel: Avoid a memcpy() of the whole block when paging in from inode"
This reverts commit 11896d0e26555b8090540b04b627d43365aaec2e. This caused a race where other processes using the same InodeVMObject could end up accessing the newly-mapped physical page before we've actually filled it with bytes from disk. It would be nice to avoid these copies without breaking anything.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/VM/MemoryManager.cpp29
1 files changed, 16 insertions, 13 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 0498fd4ccd..f0bdebd866 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -342,14 +342,14 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
ASSERT(vmo.is_inode());
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
+
auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
InterruptFlagSaver saver;
- bool interrupts_were_enabled = are_interrupts_enabled();
- if (!interrupts_were_enabled)
- sti();
+ sti();
LOCKER(vmo.m_paging_lock);
+ cli();
if (!vmo_page.is_null()) {
#ifdef PAGE_FAULT_DEBUG
@@ -362,24 +362,27 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
#ifdef MM_DEBUG
dbgprintf("MM: page_in_from_inode ready to read from inode\n");
#endif
- vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
- if (vmo_page.is_null()) {
- kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
- return false;
- }
- remap_region_page(region, page_index_in_region);
- u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
-
+ sti();
+ u8 page_buffer[PAGE_SIZE];
auto& inode = inode_vmobject.inode();
- auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, dest_ptr, nullptr);
+ auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr);
if (nread < 0) {
kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
return false;
}
if (nread < PAGE_SIZE) {
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
- memset(dest_ptr + nread, 0, PAGE_SIZE - nread);
+ memset(page_buffer + nread, 0, PAGE_SIZE - nread);
+ }
+ cli();
+ vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
+ if (vmo_page.is_null()) {
+ kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
+ return false;
}
+ remap_region_page(region, page_index_in_region);
+ u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
+ memcpy(dest_ptr, page_buffer, PAGE_SIZE);
return true;
}