summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Kernel/Arch/x86/common/Interrupts.cpp73
-rw-r--r--Kernel/Memory/PageFaultResponse.h1
-rw-r--r--Kernel/Memory/Region.cpp11
-rw-r--r--Kernel/Memory/Region.h7
4 files changed, 58 insertions, 34 deletions
diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp
index d9fb429941..271205aaa1 100644
--- a/Kernel/Arch/x86/common/Interrupts.cpp
+++ b/Kernel/Arch/x86/common/Interrupts.cpp
@@ -317,7 +317,7 @@ void page_fault_handler(TrapFrame* trap)
PageFault fault { regs.exception_code, VirtualAddress { fault_address } };
auto response = MM.handle_page_fault(fault);
- if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory) {
+ if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory || response == PageFaultResponse::BusError) {
if (faulted_in_kernel && handle_safe_access_fault(regs, fault_address)) {
// If this would be a ring0 (kernel) fault and the fault was triggered by
// safe_memcpy, safe_strnlen, or safe_memset then we resume execution at
@@ -325,6 +325,11 @@ void page_fault_handler(TrapFrame* trap)
return;
}
+ if (response == PageFaultResponse::BusError && current_thread->has_signal_handler(SIGBUS)) {
+ current_thread->send_urgent_signal_to_self(SIGBUS);
+ return;
+ }
+
if (response != PageFaultResponse::OutOfMemory && current_thread) {
if (current_thread->has_signal_handler(SIGSEGV)) {
current_thread->send_urgent_signal_to_self(SIGSEGV);
@@ -341,36 +346,40 @@ void page_fault_handler(TrapFrame* trap)
constexpr FlatPtr free_scrub_pattern = explode_byte(FREE_SCRUB_BYTE);
constexpr FlatPtr kmalloc_scrub_pattern = explode_byte(KMALLOC_SCRUB_BYTE);
constexpr FlatPtr kfree_scrub_pattern = explode_byte(KFREE_SCRUB_BYTE);
- if ((fault_address & 0xffff0000) == (malloc_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be uninitialized malloc() memory", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (free_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be recently free()'d memory", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (kmalloc_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
- } else if (fault_address < 4096) {
- dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
- } else if constexpr (SANITIZE_PTRS) {
- constexpr FlatPtr refptr_scrub_pattern = explode_byte(REFPTR_SCRUB_BYTE);
- constexpr FlatPtr nonnullrefptr_scrub_pattern = explode_byte(NONNULLREFPTR_SCRUB_BYTE);
- constexpr FlatPtr ownptr_scrub_pattern = explode_byte(OWNPTR_SCRUB_BYTE);
- constexpr FlatPtr nonnullownptr_scrub_pattern = explode_byte(NONNULLOWNPTR_SCRUB_BYTE);
- constexpr FlatPtr lockrefptr_scrub_pattern = explode_byte(LOCKREFPTR_SCRUB_BYTE);
- constexpr FlatPtr nonnulllockrefptr_scrub_pattern = explode_byte(NONNULLLOCKREFPTR_SCRUB_BYTE);
-
- if ((fault_address & 0xffff0000) == (refptr_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (nonnullrefptr_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (ownptr_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be a recently destroyed OwnPtr", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (nonnullownptr_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be a recently destroyed NonnullOwnPtr", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (lockrefptr_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
- } else if ((fault_address & 0xffff0000) == (nonnulllockrefptr_scrub_pattern & 0xffff0000)) {
- dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
+ if (response == PageFaultResponse::BusError) {
+ dbgln("Note: Address {} is an access to undefined memory range of a Inode-backed VMObject", VirtualAddress(fault_address));
+ } else {
+ if ((fault_address & 0xffff0000) == (malloc_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be uninitialized malloc() memory", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (free_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be recently free()'d memory", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (kmalloc_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
+ } else if (fault_address < 4096) {
+ dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
+ } else if constexpr (SANITIZE_PTRS) {
+ constexpr FlatPtr refptr_scrub_pattern = explode_byte(REFPTR_SCRUB_BYTE);
+ constexpr FlatPtr nonnullrefptr_scrub_pattern = explode_byte(NONNULLREFPTR_SCRUB_BYTE);
+ constexpr FlatPtr ownptr_scrub_pattern = explode_byte(OWNPTR_SCRUB_BYTE);
+ constexpr FlatPtr nonnullownptr_scrub_pattern = explode_byte(NONNULLOWNPTR_SCRUB_BYTE);
+ constexpr FlatPtr lockrefptr_scrub_pattern = explode_byte(LOCKREFPTR_SCRUB_BYTE);
+ constexpr FlatPtr nonnulllockrefptr_scrub_pattern = explode_byte(NONNULLLOCKREFPTR_SCRUB_BYTE);
+
+ if ((fault_address & 0xffff0000) == (refptr_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (nonnullrefptr_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (ownptr_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be a recently destroyed OwnPtr", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (nonnullownptr_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be a recently destroyed NonnullOwnPtr", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (lockrefptr_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
+ } else if ((fault_address & 0xffff0000) == (nonnulllockrefptr_scrub_pattern & 0xffff0000)) {
+ dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
+ }
}
}
@@ -390,6 +399,8 @@ void page_fault_handler(TrapFrame* trap)
}
}
+ if (response == PageFaultResponse::BusError)
+ return handle_crash(regs, "Page Fault (Bus Error)", SIGBUS, false);
return handle_crash(regs, "Page Fault", SIGSEGV, response == PageFaultResponse::OutOfMemory);
} else if (response == PageFaultResponse::Continue) {
dbgln_if(PAGE_FAULT_DEBUG, "Continuing after resolved page fault");
diff --git a/Kernel/Memory/PageFaultResponse.h b/Kernel/Memory/PageFaultResponse.h
index 56297f6268..0a5c473230 100644
--- a/Kernel/Memory/PageFaultResponse.h
+++ b/Kernel/Memory/PageFaultResponse.h
@@ -10,6 +10,7 @@ namespace Kernel {
enum class PageFaultResponse {
ShouldCrash,
+ BusError,
OutOfMemory,
Continue,
};
diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp
index a5ecc22fe6..0a794e2e16 100644
--- a/Kernel/Memory/Region.cpp
+++ b/Kernel/Memory/Region.cpp
@@ -362,7 +362,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
}
if (vmobject().is_inode()) {
dbgln_if(PAGE_FAULT_DEBUG, "NP(inode) fault in Region({})[{}]", this, page_index_in_region);
- return handle_inode_fault(page_index_in_region);
+ return handle_inode_fault(page_index_in_region, offset_in_page_from_address(fault.vaddr()));
}
SpinlockLocker vmobject_locker(vmobject().m_lock);
@@ -462,7 +462,7 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
return response;
}
-PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
+PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, size_t offset_in_page_in_region)
{
VERIFY(vmobject().is_inode());
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
@@ -475,6 +475,13 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
{
// NOTE: The VMObject lock is required when manipulating the VMObject's physical page slot.
SpinlockLocker locker(inode_vmobject.m_lock);
+ if (inode_vmobject.inode().size() == 0)
+ return PageFaultResponse::BusError;
+ auto fault_vaddr = vaddr_from_page_index(page_index_in_vmobject).offset(offset_in_page_in_region);
+ auto inode_last_valid_address = vaddr().offset(inode_vmobject.inode().size());
+ if (inode_last_valid_address < fault_vaddr)
+ return PageFaultResponse::BusError;
+
if (!vmobject_physical_page_slot.is_null()) {
dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
if (!remap_vmobject_page(page_index_in_vmobject, *vmobject_physical_page_slot))
diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h
index aabceef63d..1ef83b03fc 100644
--- a/Kernel/Memory/Region.h
+++ b/Kernel/Memory/Region.h
@@ -122,6 +122,11 @@ public:
return (vaddr - m_range.base()).get() / PAGE_SIZE;
}
+ [[nodiscard]] unsigned offset_in_page_from_address(VirtualAddress vaddr) const
+ {
+ return (vaddr - m_range.base()).get() % PAGE_SIZE;
+ }
+
[[nodiscard]] VirtualAddress vaddr_from_page_index(size_t page_index) const
{
return vaddr().offset(page_index * PAGE_SIZE);
@@ -219,7 +224,7 @@ private:
}
[[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index);
- [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index);
+ [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index, size_t offset_in_page_in_region);
[[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
[[nodiscard]] bool map_individual_page_impl(size_t page_index);