diff options
author | Tom <tomut@yahoo.com> | 2021-01-02 12:03:14 -0700 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-01-02 20:56:35 +0100 |
commit | c630669304b62f8cc8e28548ba9dfba29c82ca33 (patch) | |
tree | ae478f74f53183a940366f544076bbd3a8b119c3 | |
parent | e3190bd144256b34c3044da26a5a5ce1c714f706 (diff) | |
download | serenity-c630669304b62f8cc8e28548ba9dfba29c82ca33.zip |
Kernel: If a VMObject is shared, broadcast page remappings
If we remap pages (e.g. lazy allocation) inside a VMObject that is
shared among more than one region, broadcast it to any other region
that may be mapping the same page.
-rw-r--r-- | Kernel/VM/AnonymousVMObject.cpp | 2 | ||||
-rw-r--r-- | Kernel/VM/Region.cpp | 76 | ||||
-rw-r--r-- | Kernel/VM/Region.h | 50 | ||||
-rw-r--r-- | Kernel/VM/VMObject.cpp | 1 | ||||
-rw-r--r-- | Kernel/VM/VMObject.h | 6 |
5 files changed, 117 insertions, 18 deletions
diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp index 692d71956b..875112e14b 100644 --- a/Kernel/VM/AnonymousVMObject.cpp +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -215,7 +215,7 @@ int AnonymousVMObject::purge_impl() } else { klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count); } - region.remap_page_range(range.base, range.count); + region.remap_vmobject_page_range(range.base, range.count); } }); } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 62ef18995e..b0da2a4127 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -51,12 +51,14 @@ Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offs , m_cacheable(cacheable) , m_kernel(kernel) { + m_vmobject->ref_region(); register_purgeable_page_ranges(); MM.register_region(*this); } Region::~Region() { + m_vmobject->unref_region(); unregister_purgeable_page_ranges(); // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering. @@ -153,7 +155,9 @@ void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj) if (m_vmobject.ptr() == obj.ptr()) return; unregister_purgeable_page_ranges(); + m_vmobject->unref_region(); m_vmobject = move(obj); + m_vmobject->ref_region(); register_purgeable_page_ranges(); } @@ -299,11 +303,13 @@ bool Region::map_individual_page_impl(size_t page_index) return true; } -bool Region::remap_page_range(size_t page_index, size_t page_count) +bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count) { bool success = true; - ScopedSpinLock lock(s_mm_lock); + ASSERT(s_mm_lock.own_lock()); ASSERT(m_page_directory); + if (!translate_vmobject_page_range(page_index, page_count)) + return success; // not an error, region doesn't map this page range ScopedSpinLock page_lock(m_page_directory->get_lock()); size_t index = page_index; while (index < page_index + page_count) { @@ -318,10 +324,29 @@ bool Region::remap_page_range(size_t page_index, size_t page_count) return success; } -bool Region::remap_page(size_t page_index, bool with_flush) +bool Region::remap_vmobject_page_range(size_t page_index, size_t page_count) +{ + bool success = true; + ScopedSpinLock lock(s_mm_lock); + auto& vmobject = this->vmobject(); + if (vmobject.is_shared_by_multiple_regions()) { + vmobject.for_each_region([&](auto& region) { + if (!region.do_remap_vmobject_page_range(page_index, page_count)) + success = false; + }); + } else { + if (!do_remap_vmobject_page_range(page_index, page_count)) + success = false; + } + return success; +} + +bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush) { ScopedSpinLock lock(s_mm_lock); ASSERT(m_page_directory); + if (!translate_vmobject_page(page_index)) + return true; // not an error, region doesn't map this page ScopedSpinLock page_lock(m_page_directory->get_lock()); ASSERT(physical_page(page_index)); bool success = map_individual_page_impl(page_index); @@ -330,6 +355,23 @@ bool Region::remap_page(size_t page_index, bool with_flush) return success; } +bool Region::remap_vmobject_page(size_t page_index, bool with_flush) +{ + bool success = true; + ScopedSpinLock lock(s_mm_lock); + auto& vmobject = this->vmobject(); + if (vmobject.is_shared_by_multiple_regions()) { + vmobject.for_each_region([&](auto& region) { + if (!region.do_remap_vmobject_page(page_index, with_flush)) + success = false; + }); + } else { + if (!do_remap_vmobject_page(page_index, with_flush)) + success = false; + } + return success; +} + void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) { ScopedSpinLock lock(s_mm_lock); @@ -411,14 +453,15 @@ PageFaultResponse Region::handle_fault(const PageFault& fault) auto& page_slot = physical_page_slot(page_index_in_region); if (page_slot->is_lazy_committed_page()) { - page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region); - remap_page(page_index_in_region); + auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); + page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject); + remap_vmobject_page(page_index_in_vmobject); return PageFaultResponse::Continue; } #ifdef MAP_SHARED_ZERO_PAGE_LAZILY if (fault.is_read()) { page_slot = MM.shared_zero_page(); - remap_page(page_index_in_region); + remap_vmobject_page(translate_to_vmobject_page(page_index_in_region)); return PageFaultResponse::Continue; } return handle_zero_fault(page_index_in_region); @@ -453,12 +496,13 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) LOCKER(vmobject().m_paging_lock); auto& page_slot = physical_page_slot(page_index_in_region); + auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) { #ifdef PAGE_FAULT_DEBUG dbg() << "MM: zero_page() but page already present. Fine with me!"; #endif - if (!remap_page(page_index_in_region)) + if (!remap_vmobject_page(page_index_in_vmobject)) return PageFaultResponse::OutOfMemory; return PageFaultResponse::Continue; } @@ -468,7 +512,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) current_thread->did_zero_fault(); if (page_slot->is_lazy_committed_page()) { - page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region); + page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject); #ifdef PAGE_FAULT_DEBUG dbg() << " >> ALLOCATED COMMITTED " << page_slot->paddr(); #endif @@ -483,7 +527,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) #endif } - if (!remap_page(page_index_in_region)) { + if (!remap_vmobject_page(page_index_in_vmobject)) { klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot; return PageFaultResponse::OutOfMemory; } @@ -500,8 +544,9 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) if (!vmobject().is_anonymous()) return PageFaultResponse::ShouldCrash; - auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(first_page_index() + page_index_in_region, vaddr().offset(page_index_in_region * PAGE_SIZE)); - if (!remap_page(page_index_in_region)) + auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); + auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(page_index_in_vmobject, vaddr().offset(page_index_in_region * PAGE_SIZE)); + if (!remap_vmobject_page(page_index_in_vmobject)) return PageFaultResponse::OutOfMemory; return response; } @@ -515,7 +560,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) ASSERT_INTERRUPTS_DISABLED(); auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject()); - auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region]; + auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); + auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject]; #ifdef PAGE_FAULT_DEBUG dbg() << "Inode fault in " << name() << " page index: " << page_index_in_region; @@ -525,7 +571,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) #ifdef PAGE_FAULT_DEBUG dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!"); #endif - if (!remap_page(page_index_in_region)) + if (!remap_vmobject_page(page_index_in_vmobject)) return PageFaultResponse::OutOfMemory; return PageFaultResponse::Continue; } @@ -541,7 +587,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) u8 page_buffer[PAGE_SIZE]; auto& inode = inode_vmobject.inode(); auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer); - auto nread = inode.read_bytes((first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, buffer, nullptr); + auto nread = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr); if (nread < 0) { klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!"; return PageFaultResponse::ShouldCrash; @@ -569,7 +615,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) } MM.unquickmap_page(); - remap_page(page_index_in_region); + remap_vmobject_page(page_index_in_vmobject); return PageFaultResponse::Continue; } diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 490de32cbf..786dfc09dc 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -122,6 +122,49 @@ public: return vaddr().offset(page_index * PAGE_SIZE); } + bool translate_vmobject_page(size_t& index) const + { + auto first_index = first_page_index(); + if (index < first_index) { + index = first_index; + return false; + } + index -= first_index; + auto total_page_count = this->page_count(); + if (index >= total_page_count) { + index = first_index + total_page_count - 1; + return false; + } + return true; + } + + bool translate_vmobject_page_range(size_t& index, size_t& page_count) const + { + auto first_index = first_page_index(); + if (index < first_index) { + auto delta = first_index - index; + index = first_index; + if (delta >= page_count) { + page_count = 0; + return false; + } + page_count -= delta; + } + index -= first_index; + auto total_page_count = this->page_count(); + if (index + page_count > total_page_count) { + page_count = total_page_count - index; + if (page_count == 0) + return false; + } + return true; + } + + ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const + { + return first_page_index() + page_index; + } + size_t first_page_index() const { return m_offset_in_vmobject / PAGE_SIZE; @@ -186,7 +229,7 @@ public: void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; } - bool remap_page_range(size_t page_index, size_t page_count); + bool remap_vmobject_page_range(size_t page_index, size_t page_count); bool is_volatile(VirtualAddress vaddr, size_t size) const; enum class SetVolatileError { @@ -199,6 +242,8 @@ public: RefPtr<Process> get_owner(); private: + bool do_remap_vmobject_page_range(size_t page_index, size_t page_count); + void set_access_bit(Access access, bool b) { if (b) @@ -207,7 +252,8 @@ private: m_access &= ~access; } - bool remap_page(size_t index, bool with_flush = true); + bool do_remap_vmobject_page(size_t index, bool with_flush = true); + bool remap_vmobject_page(size_t index, bool with_flush = true); PageFaultResponse handle_cow_fault(size_t page_index); PageFaultResponse handle_inode_fault(size_t page_index); diff --git a/Kernel/VM/VMObject.cpp b/Kernel/VM/VMObject.cpp index 9c69b5a270..664ace8b5c 100644 --- a/Kernel/VM/VMObject.cpp +++ b/Kernel/VM/VMObject.cpp @@ -46,6 +46,7 @@ VMObject::VMObject(size_t size) VMObject::~VMObject() { MM.unregister_vmobject(*this); + ASSERT(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0); } } diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h index d9f48bd4ca..2a48daa66a 100644 --- a/Kernel/VM/VMObject.h +++ b/Kernel/VM/VMObject.h @@ -67,6 +67,10 @@ public: VMObject* m_next { nullptr }; VMObject* m_prev { nullptr }; + ALWAYS_INLINE void ref_region() { m_regions_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed); } + ALWAYS_INLINE void unref_region() { m_regions_count.fetch_sub(1, AK::MemoryOrder::memory_order_relaxed); } + ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) > 1; } + protected: explicit VMObject(size_t); explicit VMObject(const VMObject&); @@ -83,6 +87,8 @@ private: VMObject& operator=(const VMObject&) = delete; VMObject& operator=(VMObject&&) = delete; VMObject(VMObject&&) = delete; + + Atomic<u32> m_regions_count { 0 }; }; } |