diff options
author | Tom <tomut@yahoo.com> | 2020-09-01 16:10:54 -0600 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-09-02 00:35:56 +0200 |
commit | bf268a0185d23447cec4e9175eda79449cd52b67 (patch) | |
tree | 74eded2baca876cea2b63e301f728550ec9b4ed1 /Kernel/VM | |
parent | 1ece93c8055142293d5d9745bc26e27abd5c00c5 (diff) | |
download | serenity-bf268a0185d23447cec4e9175eda79449cd52b67.zip |
Kernel: Handle committing pages in regions more gracefully
Sometimes a physical underlying page may be there, but we may be
unable to allocate a page table that may be needed to map it. Bubble
up such mapping errors so that they can be handled more appropriately.
Diffstat (limited to 'Kernel/VM')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 14 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.h | 2 | ||||
-rw-r--r-- | Kernel/VM/Region.cpp | 67 | ||||
-rw-r--r-- | Kernel/VM/Region.h | 6 |
4 files changed, 58 insertions, 31 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 79d37784bc..88bb451435 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -89,18 +89,18 @@ void MemoryManager::protect_kernel_image() { // Disable writing to the kernel text and rodata segments. for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) { - auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i)); + auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); pte.set_writable(false); } if (Processor::current().has_feature(CPUFeature::NX)) { // Disable execution of the kernel data and bss segments, as well as the kernel heap. for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) { - auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i)); + auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); pte.set_execute_disabled(true); } for (size_t i = FlatPtr(kmalloc_start); i < FlatPtr(kmalloc_end); i += PAGE_SIZE) { - auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i)); + auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); pte.set_execute_disabled(true); } } @@ -209,7 +209,7 @@ PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualA return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index]; } -PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) +PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) { ASSERT_INTERRUPTS_DISABLED(); ASSERT(s_mm_lock.own_lock()); @@ -225,6 +225,10 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual #endif bool did_purge = false; auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge); + if (!page_table) { + dbg() << "MM: Unable to allocate page table to map " << vaddr; + return nullptr; + } if (did_purge) { // If any memory had to be purged, ensure_pte may have been called as part // of the purging process. So we need to re-map the pd in this case to ensure @@ -247,7 +251,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual ASSERT(result == AK::HashSetResult::InsertedNewEntry); } - return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index]; + return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index]; } void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index f30fda1c33..f3f4bbb90c 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -195,7 +195,7 @@ private: PageTableEntry* quickmap_pt(PhysicalAddress); PageTableEntry* pte(const PageDirectory&, VirtualAddress); - PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); + PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress); void release_pte(PageDirectory&, VirtualAddress, bool); RefPtr<PageDirectory> m_kernel_page_directory; diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index c1884b0647..d3b4d02676 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -227,38 +227,46 @@ Bitmap& Region::ensure_cow_map() const return *m_cow_map; } -void Region::map_individual_page_impl(size_t page_index) +bool Region::map_individual_page_impl(size_t page_index) { auto page_vaddr = vaddr_from_page_index(page_index); - auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr); + auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr); + if (!pte) { +#ifdef MM_DEBUG + dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << " " << name() << " cannot create PTE for " << page_vaddr; +#endif + return false; + } auto* page = physical_page(page_index); if (!page || (!is_readable() && !is_writable())) { - pte.clear(); + pte->clear(); } else { - pte.set_cache_disabled(!m_cacheable); - pte.set_physical_page_base(page->paddr().get()); - pte.set_present(true); + pte->set_cache_disabled(!m_cacheable); + pte->set_physical_page_base(page->paddr().get()); + pte->set_present(true); if (should_cow(page_index)) - pte.set_writable(false); + pte->set_writable(false); else - pte.set_writable(is_writable()); + pte->set_writable(is_writable()); if (Processor::current().has_feature(CPUFeature::NX)) - pte.set_execute_disabled(!is_executable()); - pte.set_user_allowed(is_user_accessible()); + pte->set_execute_disabled(!is_executable()); + pte->set_user_allowed(is_user_accessible()); #ifdef MM_DEBUG - dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")"; + dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte->raw() << "{" << pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")"; #endif } + return true; } -void Region::remap_page(size_t page_index, bool with_flush) +bool Region::remap_page(size_t page_index, bool with_flush) { ASSERT(m_page_directory); ScopedSpinLock lock(s_mm_lock); ASSERT(physical_page(page_index)); - map_individual_page_impl(page_index); + bool success = map_individual_page_impl(page_index); if (with_flush) MM.flush_tlb(vaddr_from_page_index(page_index)); + return success; } void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) @@ -291,16 +299,24 @@ void Region::set_page_directory(PageDirectory& page_directory) m_page_directory = page_directory; } -void Region::map(PageDirectory& page_directory) +bool Region::map(PageDirectory& page_directory) { ScopedSpinLock lock(s_mm_lock); set_page_directory(page_directory); #ifdef MM_DEBUG dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")"; #endif - for (size_t page_index = 0; page_index < page_count(); ++page_index) - map_individual_page_impl(page_index); - MM.flush_tlb(vaddr(), page_count()); + size_t page_index = 0; + while (page_index < page_count()) { + if (!map_individual_page_impl(page_index)) + break; + ++page_index; + } + if (page_index > 0) { + MM.flush_tlb(vaddr(), page_index); + return page_index == page_count(); + } + return false; } void Region::remap() @@ -371,7 +387,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) #ifdef PAGE_FAULT_DEBUG dbg() << "MM: zero_page() but page already present. Fine with me!"; #endif - remap_page(page_index_in_region); + if (!remap_page(page_index_in_region)) + return PageFaultResponse::OutOfMemory; return PageFaultResponse::Continue; } @@ -389,7 +406,10 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) dbg() << " >> ZERO " << page->paddr(); #endif page_slot = move(page); - remap_page(page_index_in_region); + if (!remap_page(page_index_in_region)) { + klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot; + return PageFaultResponse::OutOfMemory; + } return PageFaultResponse::Continue; } @@ -402,7 +422,8 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w"; #endif set_should_cow(page_index_in_region, false); - remap_page(page_index_in_region); + if (!remap_page(page_index_in_region)) + return PageFaultResponse::OutOfMemory; return PageFaultResponse::Continue; } @@ -428,7 +449,8 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) page_slot = move(page); MM.unquickmap_page(); set_should_cow(page_index_in_region, false); - remap_page(page_index_in_region); + if (!remap_page(page_index_in_region)) + return PageFaultResponse::OutOfMemory; return PageFaultResponse::Continue; } @@ -452,7 +474,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) #ifdef PAGE_FAULT_DEBUG dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!"); #endif - remap_page(page_index_in_region); + if (!remap_page(page_index_in_region)) + return PageFaultResponse::OutOfMemory; return PageFaultResponse::Continue; } diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 6a40edb3af..5480e186de 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -171,7 +171,7 @@ public: void set_executable(bool b) { set_access_bit(Access::Execute, b); } void set_page_directory(PageDirectory&); - void map(PageDirectory&); + bool map(PageDirectory&); enum class ShouldDeallocateVirtualMemoryRange { No, Yes, @@ -201,13 +201,13 @@ private: } bool commit(size_t page_index); - void remap_page(size_t index, bool with_flush = true); + bool remap_page(size_t index, bool with_flush = true); PageFaultResponse handle_cow_fault(size_t page_index); PageFaultResponse handle_inode_fault(size_t page_index); PageFaultResponse handle_zero_fault(size_t page_index); - void map_individual_page_impl(size_t page_index); + bool map_individual_page_impl(size_t page_index); RefPtr<PageDirectory> m_page_directory; Range m_range; |