diff options
author | Liav A <liavalb@gmail.com> | 2022-08-07 22:08:52 +0300 |
---|---|---|
committer | Idan Horowitz <idan.horowitz@gmail.com> | 2022-09-16 14:55:45 +0300 |
commit | 0c675192c91999cc2b60cbbea708dbf7b5637897 (patch) | |
tree | 138ba4c7e0cad284a4d158eb69f48f3c45b3633f /Kernel/Memory | |
parent | 3ad0e1a1d595a931e21c8f4ccfb15f61379fa647 (diff) | |
download | serenity-0c675192c91999cc2b60cbbea708dbf7b5637897.zip |
Kernel: Send SIGBUS to threads that use after valid Inode mmaped range
According to Dr. POSIX, we should allow to call mmap on inodes even on
ranges that currently don't map to any actual data. Trying to read or
write to those ranges should result in SIGBUS being sent to the thread
that did violating memory access.
Diffstat (limited to 'Kernel/Memory')
-rw-r--r-- | Kernel/Memory/PageFaultResponse.h | 1 | ||||
-rw-r--r-- | Kernel/Memory/Region.cpp | 11 | ||||
-rw-r--r-- | Kernel/Memory/Region.h | 7 |
3 files changed, 16 insertions, 3 deletions
diff --git a/Kernel/Memory/PageFaultResponse.h b/Kernel/Memory/PageFaultResponse.h index 56297f6268..0a5c473230 100644 --- a/Kernel/Memory/PageFaultResponse.h +++ b/Kernel/Memory/PageFaultResponse.h @@ -10,6 +10,7 @@ namespace Kernel { enum class PageFaultResponse { ShouldCrash, + BusError, OutOfMemory, Continue, }; diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index a5ecc22fe6..0a794e2e16 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -362,7 +362,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault) } if (vmobject().is_inode()) { dbgln_if(PAGE_FAULT_DEBUG, "NP(inode) fault in Region({})[{}]", this, page_index_in_region); - return handle_inode_fault(page_index_in_region); + return handle_inode_fault(page_index_in_region, offset_in_page_from_address(fault.vaddr())); } SpinlockLocker vmobject_locker(vmobject().m_lock); @@ -462,7 +462,7 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) return response; } -PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) +PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, size_t offset_in_page_in_region) { VERIFY(vmobject().is_inode()); VERIFY(!g_scheduler_lock.is_locked_by_current_processor()); @@ -475,6 +475,13 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) { // NOTE: The VMObject lock is required when manipulating the VMObject's physical page slot. SpinlockLocker locker(inode_vmobject.m_lock); + if (inode_vmobject.inode().size() == 0) + return PageFaultResponse::BusError; + auto fault_vaddr = vaddr_from_page_index(page_index_in_vmobject).offset(offset_in_page_in_region); + auto inode_last_valid_address = vaddr().offset(inode_vmobject.inode().size()); + if (inode_last_valid_address < fault_vaddr) + return PageFaultResponse::BusError; + if (!vmobject_physical_page_slot.is_null()) { dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping."); if (!remap_vmobject_page(page_index_in_vmobject, *vmobject_physical_page_slot)) diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index aabceef63d..1ef83b03fc 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -122,6 +122,11 @@ public: return (vaddr - m_range.base()).get() / PAGE_SIZE; } + [[nodiscard]] unsigned offset_in_page_from_address(VirtualAddress vaddr) const + { + return (vaddr - m_range.base()).get() % PAGE_SIZE; + } + [[nodiscard]] VirtualAddress vaddr_from_page_index(size_t page_index) const { return vaddr().offset(page_index * PAGE_SIZE); @@ -219,7 +224,7 @@ private: } [[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index); - [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index); + [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index, size_t offset_in_page_in_region); [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault); [[nodiscard]] bool map_individual_page_impl(size_t page_index); |