summaryrefslogtreecommitdiff
path: root/Kernel/VM/Region.cpp
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2021-01-23 09:11:45 -0700
committerAndreas Kling <kling@serenityos.org>2021-01-27 22:48:41 +0100
commit250a31045485f231ad7d5b51247688796c5e9d47 (patch)
treeb5e7add3e1e8556618811ba7f10a6eeebfb6db1d /Kernel/VM/Region.cpp
parentac3927086fe663db0cb04fd0d746a68c96e229b4 (diff)
downloadserenity-250a31045485f231ad7d5b51247688796c5e9d47.zip
Kernel: Release MM lock while yielding from inode page fault handler
We need to make sure other processors can grab the MM lock while we wait, so release it when we might block. Reading the page from disk may also block, so release it during that time as well.
Diffstat (limited to 'Kernel/VM/Region.cpp')
-rw-r--r--Kernel/VM/Region.cpp24
1 files changed, 18 insertions, 6 deletions
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index 3c18b1d65b..54f216dc90 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -281,7 +281,8 @@ bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count)
{
bool success = true;
ASSERT(s_mm_lock.own_lock());
- ASSERT(m_page_directory);
+ if (!m_page_directory)
+ return success; // not an error, region may have not yet mapped it
if (!translate_vmobject_page_range(page_index, page_count))
return success; // not an error, region doesn't map this page range
ScopedSpinLock page_lock(m_page_directory->get_lock());
@@ -318,7 +319,8 @@ bool Region::remap_vmobject_page_range(size_t page_index, size_t page_count)
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
{
ScopedSpinLock lock(s_mm_lock);
- ASSERT(m_page_directory);
+ if (!m_page_directory)
+ return true; // not an error, region may have not yet mapped it
if (!translate_vmobject_page(page_index))
return true; // not an error, region doesn't map this page
ScopedSpinLock page_lock(m_page_directory->get_lock());
@@ -404,9 +406,8 @@ void Region::remap()
map(*m_page_directory);
}
-PageFaultResponse Region::handle_fault(const PageFault& fault)
+PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
{
- ScopedSpinLock lock(s_mm_lock);
auto page_index_in_region = page_index_from_address(fault.vaddr());
if (fault.type() == PageFault::Type::PageNotPresent) {
if (fault.is_read() && !is_readable()) {
@@ -419,7 +420,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
}
if (vmobject().is_inode()) {
dbgln<PAGE_FAULT_DEBUG>("NP(inode) fault in Region({})[{}]", this, page_index_in_region);
- return handle_inode_fault(page_index_in_region);
+ return handle_inode_fault(page_index_in_region, mm_lock);
}
auto& page_slot = physical_page_slot(page_index_in_region);
@@ -514,13 +515,19 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
return response;
}
-PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
+PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(vmobject().is_inode());
+ mm_lock.unlock();
+ ASSERT(!s_mm_lock.own_lock());
+ ASSERT(!g_scheduler_lock.own_lock());
+
LOCKER(vmobject().m_paging_lock);
+ mm_lock.lock();
+
ASSERT_INTERRUPTS_DISABLED();
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
@@ -541,8 +548,13 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
u8 page_buffer[PAGE_SIZE];
auto& inode = inode_vmobject.inode();
+
+ // Reading the page may block, so release the MM lock temporarily
+ mm_lock.unlock();
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
auto nread = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
+ mm_lock.lock();
+
if (nread < 0) {
klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!";
return PageFaultResponse::ShouldCrash;