diff options
author | Andreas Kling <kling@serenityos.org> | 2021-02-23 20:42:32 +0100 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-02-23 20:56:54 +0100 |
commit | 5d180d1f996ead27f9c5cb3db7f91e293de34d9d (patch) | |
tree | e881854dac5d749518562970d6194a0ef65736ec /Kernel/StdLib.cpp | |
parent | b33a6a443e700cd80325d312f21c985b0687bb97 (diff) | |
download | serenity-5d180d1f996ead27f9c5cb3db7f91e293de34d9d.zip |
Everywhere: Rename ASSERT => VERIFY
(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED)
Since all of these checks are done in release builds as well,
let's rename them to VERIFY to prevent confusion, as everyone is
used to assertions being compiled out in release.
We can introduce a new ASSERT macro that is specifically for debug
checks, but I'm doing this wholesale conversion first since we've
accumulated thousands of these already, and it's not immediately
obvious which ones are suitable for ASSERT.
Diffstat (limited to 'Kernel/StdLib.cpp')
-rw-r--r-- | Kernel/StdLib.cpp | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/Kernel/StdLib.cpp b/Kernel/StdLib.cpp index 34e8375bb9..2b6df4871e 100644 --- a/Kernel/StdLib.cpp +++ b/Kernel/StdLib.cpp @@ -110,7 +110,7 @@ Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expe { if (FlatPtr(var) & 3) return {}; // not aligned! - ASSERT(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected))); + VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected))); bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var)); if (!is_user) return {}; @@ -169,11 +169,11 @@ bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n) bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n); if (!is_user) return false; - ASSERT(!Kernel::is_user_range(VirtualAddress(src_ptr), n)); + VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n)); Kernel::SmapDisabler disabler; void* fault_at; if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) { - ASSERT(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n)); + VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n)); klog() << "copy_to_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at); return false; } @@ -185,11 +185,11 @@ bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n) bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n); if (!is_user) return false; - ASSERT(!Kernel::is_user_range(VirtualAddress(dest_ptr), n)); + VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n)); Kernel::SmapDisabler disabler; void* fault_at; if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) { - ASSERT(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n)); + VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n)); klog() << "copy_from_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at); return false; } @@ -361,22 +361,22 @@ extern "C" int __cxa_atexit(void (*)(void*), void*, void*); [[noreturn]] void __stack_chk_fail() { - ASSERT_NOT_REACHED(); + VERIFY_NOT_REACHED(); } [[noreturn]] void __stack_chk_fail_local() { - ASSERT_NOT_REACHED(); + VERIFY_NOT_REACHED(); } extern "C" int __cxa_atexit(void (*)(void*), void*, void*) { - ASSERT_NOT_REACHED(); + VERIFY_NOT_REACHED(); return 0; } [[noreturn]] void __cxa_pure_virtual() { - ASSERT_NOT_REACHED(); + VERIFY_NOT_REACHED(); } } |