summaryrefslogtreecommitdiff
path: root/Kernel/Syscalls
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-23 20:42:32 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-23 20:56:54 +0100
commit5d180d1f996ead27f9c5cb3db7f91e293de34d9d (patch)
treee881854dac5d749518562970d6194a0ef65736ec /Kernel/Syscalls
parentb33a6a443e700cd80325d312f21c985b0687bb97 (diff)
downloadserenity-5d180d1f996ead27f9c5cb3db7f91e293de34d9d.zip
Everywhere: Rename ASSERT => VERIFY
(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED) Since all of these checks are done in release builds as well, let's rename them to VERIFY to prevent confusion, as everyone is used to assertions being compiled out in release. We can introduce a new ASSERT macro that is specifically for debug checks, but I'm doing this wholesale conversion first since we've accumulated thousands of these already, and it's not immediately obvious which ones are suitable for ASSERT.
Diffstat (limited to 'Kernel/Syscalls')
-rw-r--r--Kernel/Syscalls/execve.cpp54
-rw-r--r--Kernel/Syscalls/exit.cpp2
-rw-r--r--Kernel/Syscalls/futex.cpp20
-rw-r--r--Kernel/Syscalls/get_stack_bounds.cpp2
-rw-r--r--Kernel/Syscalls/kill.cpp4
-rw-r--r--Kernel/Syscalls/mmap.cpp4
-rw-r--r--Kernel/Syscalls/module.cpp6
-rw-r--r--Kernel/Syscalls/ptrace.cpp2
-rw-r--r--Kernel/Syscalls/select.cpp6
-rw-r--r--Kernel/Syscalls/socket.cpp4
-rw-r--r--Kernel/Syscalls/thread.cpp2
-rw-r--r--Kernel/Syscalls/unveil.cpp2
-rw-r--r--Kernel/Syscalls/waitid.cpp2
-rw-r--r--Kernel/Syscalls/write.cpp2
14 files changed, 56 insertions, 56 deletions
diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp
index 82e8ba2748..41c76a8b85 100644
--- a/Kernel/Syscalls/execve.cpp
+++ b/Kernel/Syscalls/execve.cpp
@@ -195,7 +195,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro
return IterationDecision::Continue;
});
- ASSERT(range.end > range.start);
+ VERIFY(range.end > range.start);
return range;
};
@@ -283,15 +283,15 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
FlatPtr load_base_address = 0;
String elf_name = object_description.absolute_path();
- ASSERT(!Processor::current().in_critical());
+ VERIFY(!Processor::current().in_critical());
MemoryManager::enter_space(*new_space);
KResult ph_load_result = KSuccess;
elf_image.for_each_program_header([&](const ELF::Image::ProgramHeader& program_header) {
if (program_header.type() == PT_TLS) {
- ASSERT(should_allocate_tls == ShouldAllocateTls::Yes);
- ASSERT(program_header.size_in_memory());
+ VERIFY(should_allocate_tls == ShouldAllocateTls::Yes);
+ VERIFY(program_header.size_in_memory());
if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
dbgln("Shenanigans! ELF PT_TLS header sneaks outside of executable.");
@@ -325,8 +325,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
if (program_header.is_writable()) {
// Writable section: create a copy in memory.
- ASSERT(program_header.size_in_memory());
- ASSERT(program_header.alignment() == PAGE_SIZE);
+ VERIFY(program_header.size_in_memory());
+ VERIFY(program_header.alignment() == PAGE_SIZE);
if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
dbgln("Shenanigans! Writable ELF PT_LOAD header sneaks outside of executable.");
@@ -368,8 +368,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
}
// Non-writable section: map the executable itself in memory.
- ASSERT(program_header.size_in_memory());
- ASSERT(program_header.alignment() == PAGE_SIZE);
+ VERIFY(program_header.size_in_memory());
+ VERIFY(program_header.alignment() == PAGE_SIZE);
int prot = 0;
if (program_header.is_readable())
prot |= PROT_READ;
@@ -454,17 +454,17 @@ KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_
return interpreter_load_result.error();
// TLS allocation will be done in userspace by the loader
- ASSERT(!interpreter_load_result.value().tls_region);
- ASSERT(!interpreter_load_result.value().tls_alignment);
- ASSERT(!interpreter_load_result.value().tls_size);
+ VERIFY(!interpreter_load_result.value().tls_region);
+ VERIFY(!interpreter_load_result.value().tls_alignment);
+ VERIFY(!interpreter_load_result.value().tls_size);
return interpreter_load_result;
}
KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const Elf32_Ehdr& main_program_header)
{
- ASSERT(is_user_process());
- ASSERT(!Processor::current().in_critical());
+ VERIFY(is_user_process());
+ VERIFY(!Processor::current().in_critical());
auto path = main_program_description->absolute_path();
dbgln_if(EXEC_DEBUG, "do_exec: {}", path);
@@ -522,7 +522,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
auto signal_trampoline_region = m_space->allocate_region_with_vmobject(signal_trampoline_range.value(), g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true);
if (signal_trampoline_region.is_error()) {
- ASSERT_NOT_REACHED();
+ VERIFY_NOT_REACHED();
}
signal_trampoline_region.value()->set_syscall_region(true);
@@ -557,7 +557,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
int main_program_fd = -1;
if (interpreter_description) {
main_program_fd = alloc_fd();
- ASSERT(main_program_fd >= 0);
+ VERIFY(main_program_fd >= 0);
main_program_description->seek(0, SEEK_SET);
main_program_description->set_readable(true);
m_fds[main_program_fd].set(move(main_program_description), FD_CLOEXEC);
@@ -572,7 +572,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
return IterationDecision::Break;
});
}
- ASSERT(new_main_thread);
+ VERIFY(new_main_thread);
auto auxv = generate_auxiliary_vector(load_result.load_base, load_result.entry_eip, m_uid, m_euid, m_gid, m_egid, path, main_program_fd);
@@ -604,7 +604,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
auto tsr_result = new_main_thread->make_thread_specific_region({});
if (tsr_result.is_error()) {
// FIXME: We cannot fail this late. Refactor this so the allocation happens before we commit to the new executable.
- ASSERT_NOT_REACHED();
+ VERIFY_NOT_REACHED();
}
new_main_thread->reset_fpu_state();
@@ -630,8 +630,8 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
}
u32 lock_count_to_restore;
[[maybe_unused]] auto rc = big_lock().force_unlock_if_locked(lock_count_to_restore);
- ASSERT_INTERRUPTS_DISABLED();
- ASSERT(Processor::current().in_critical());
+ VERIFY_INTERRUPTS_DISABLED();
+ VERIFY(Processor::current().in_critical());
return KSuccess;
}
@@ -727,7 +727,7 @@ KResultOr<RefPtr<FileDescription>> Process::find_elf_interpreter_for_executable(
auto interpreter_description = interp_result.value();
auto interp_metadata = interpreter_description->metadata();
- ASSERT(interpreter_description->inode());
+ VERIFY(interpreter_description->inode());
// Validate the program interpreter as a valid elf binary.
// If your program interpreter is a #! file or something, it's time to stop playing games :)
@@ -805,7 +805,7 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
if (metadata.size < 3)
return ENOEXEC;
- ASSERT(description->inode());
+ VERIFY(description->inode());
// Read the first page of the program into memory so we can validate the binfmt of it
char first_page[PAGE_SIZE];
@@ -856,20 +856,20 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
if (result.is_error())
return result;
- ASSERT_INTERRUPTS_DISABLED();
- ASSERT(Processor::current().in_critical());
+ VERIFY_INTERRUPTS_DISABLED();
+ VERIFY(Processor::current().in_critical());
auto current_thread = Thread::current();
if (current_thread == new_main_thread) {
// We need to enter the scheduler lock before changing the state
// and it will be released after the context switch into that
// thread. We should also still be in our critical section
- ASSERT(!g_scheduler_lock.own_lock());
- ASSERT(Processor::current().in_critical() == 1);
+ VERIFY(!g_scheduler_lock.own_lock());
+ VERIFY(Processor::current().in_critical() == 1);
g_scheduler_lock.lock();
current_thread->set_state(Thread::State::Running);
Processor::assume_context(*current_thread, prev_flags);
- ASSERT_NOT_REACHED();
+ VERIFY_NOT_REACHED();
}
Processor::current().leave_critical(prev_flags);
@@ -926,7 +926,7 @@ int Process::sys$execve(Userspace<const Syscall::SC_execve_params*> user_params)
return -EFAULT;
auto result = exec(move(path), move(arguments), move(environment));
- ASSERT(result.is_error()); // We should never continue after a successful exec!
+ VERIFY(result.is_error()); // We should never continue after a successful exec!
return result.error();
}
diff --git a/Kernel/Syscalls/exit.cpp b/Kernel/Syscalls/exit.cpp
index 37d058fdab..cd127649d7 100644
--- a/Kernel/Syscalls/exit.cpp
+++ b/Kernel/Syscalls/exit.cpp
@@ -37,7 +37,7 @@ void Process::sys$exit(int status)
m_termination_signal = 0;
die();
Thread::current()->die_if_needed();
- ASSERT_NOT_REACHED();
+ VERIFY_NOT_REACHED();
}
}
diff --git a/Kernel/Syscalls/futex.cpp b/Kernel/Syscalls/futex.cpp
index 61c64dd3b8..1240c7ee3e 100644
--- a/Kernel/Syscalls/futex.cpp
+++ b/Kernel/Syscalls/futex.cpp
@@ -62,7 +62,7 @@ FutexQueue::~FutexQueue()
void FutexQueue::vmobject_deleted(VMObject& vmobject)
{
- ASSERT(m_is_global); // If we got called we must be a global futex
+ VERIFY(m_is_global); // If we got called we must be a global futex
// Because we're taking ourselves out of the global queue, we need
// to make sure we have at last a reference until we're done
NonnullRefPtr<FutexQueue> own_ref(*this);
@@ -88,7 +88,7 @@ void FutexQueue::vmobject_deleted(VMObject& vmobject)
dbgln("Futex @ {} unblocked {} waiters due to vmobject free", this, wake_count);
}
- ASSERT(did_wake_all); // No one should be left behind...
+ VERIFY(did_wake_all); // No one should be left behind...
}
void Process::clear_futex_queues_on_exec()
@@ -97,7 +97,7 @@ void Process::clear_futex_queues_on_exec()
for (auto& it : m_futex_queues) {
bool did_wake_all;
it.value->wake_all(did_wake_all);
- ASSERT(did_wake_all); // No one should be left behind...
+ VERIFY(did_wake_all); // No one should be left behind...
}
m_futex_queues.clear();
}
@@ -172,16 +172,16 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
if (create_if_not_found) {
// TODO: is there a better way than setting and finding it again?
auto result = global_queues.set(&vmobject, {});
- ASSERT(result == AK::HashSetResult::InsertedNewEntry);
+ VERIFY(result == AK::HashSetResult::InsertedNewEntry);
it = global_queues.find(&vmobject);
- ASSERT(it != global_queues.end());
+ VERIFY(it != global_queues.end());
return &it->value;
}
return nullptr;
};
auto find_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found) -> RefPtr<FutexQueue> {
- ASSERT(is_private || vmobject);
+ VERIFY(is_private || vmobject);
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, create_if_not_found);
if (!queues)
return {};
@@ -191,7 +191,7 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
if (create_if_not_found) {
auto futex_queue = adopt(*new FutexQueue(user_address_or_offset, vmobject));
auto result = queues->set(user_address_or_offset, futex_queue);
- ASSERT(result == AK::HashSetResult::InsertedNewEntry);
+ VERIFY(result == AK::HashSetResult::InsertedNewEntry);
return futex_queue;
}
return {};
@@ -234,7 +234,7 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, true);
- ASSERT(futex_queue);
+ VERIFY(futex_queue);
// We need to release the lock before blocking. But we have a reference
// to the FutexQueue so that we can keep it alive.
@@ -358,13 +358,13 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
return do_requeue(params.val3);
case FUTEX_WAIT_BITSET:
- ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAIT
+ VERIFY(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAIT
if (params.val3 == 0)
return -EINVAL;
return do_wait(params.val3);
case FUTEX_WAKE_BITSET:
- ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
+ VERIFY(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
if (params.val3 == 0)
return -EINVAL;
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, params.val3);
diff --git a/Kernel/Syscalls/get_stack_bounds.cpp b/Kernel/Syscalls/get_stack_bounds.cpp
index b72939220d..f187fdf667 100644
--- a/Kernel/Syscalls/get_stack_bounds.cpp
+++ b/Kernel/Syscalls/get_stack_bounds.cpp
@@ -36,7 +36,7 @@ int Process::sys$get_stack_bounds(FlatPtr* user_stack_base, size_t* user_stack_s
auto* stack_region = space().find_region_containing(Range { VirtualAddress(stack_pointer), 1 });
// The syscall handler should have killed us if we had an invalid stack pointer.
- ASSERT(stack_region);
+ VERIFY(stack_region);
FlatPtr stack_base = stack_region->range().base().get();
size_t stack_size = stack_region->size();
diff --git a/Kernel/Syscalls/kill.cpp b/Kernel/Syscalls/kill.cpp
index 33d8c1643c..e8b8978799 100644
--- a/Kernel/Syscalls/kill.cpp
+++ b/Kernel/Syscalls/kill.cpp
@@ -47,7 +47,7 @@ KResult Process::do_killpg(ProcessGroupID pgrp, int signal)
{
InterruptDisabler disabler;
- ASSERT(pgrp >= 0);
+ VERIFY(pgrp >= 0);
// Send the signal to all processes in the given group.
if (pgrp == 0) {
@@ -136,7 +136,7 @@ int Process::sys$kill(pid_t pid_or_pgid, int signal)
if (pid_or_pgid == m_pid.value()) {
return do_killself(signal);
}
- ASSERT(pid_or_pgid >= 0);
+ VERIFY(pid_or_pgid >= 0);
ScopedSpinLock lock(g_processes_lock);
auto peer = Process::from_pid(pid_or_pgid);
if (!peer)
diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp
index fa39188769..2ee5f5409e 100644
--- a/Kernel/Syscalls/mmap.cpp
+++ b/Kernel/Syscalls/mmap.cpp
@@ -465,7 +465,7 @@ int Process::sys$munmap(void* addr, size_t size)
if (!whole_region->is_mmap())
return -EPERM;
bool success = space().deallocate_region(*whole_region);
- ASSERT(success);
+ VERIFY(success);
return 0;
}
@@ -557,7 +557,7 @@ void* Process::sys$allocate_tls(size_t size)
main_thread = &thread;
return IterationDecision::Break;
});
- ASSERT(main_thread);
+ VERIFY(main_thread);
auto range = space().allocate_range({}, size);
if (!range.has_value())
diff --git a/Kernel/Syscalls/module.cpp b/Kernel/Syscalls/module.cpp
index c404d4970e..6582d3782d 100644
--- a/Kernel/Syscalls/module.cpp
+++ b/Kernel/Syscalls/module.cpp
@@ -80,7 +80,7 @@ int Process::sys$module_load(Userspace<const char*> user_path, size_t path_lengt
if (!section.size())
return IterationDecision::Continue;
auto* section_storage = section_storage_by_name.get(section.name()).value_or(nullptr);
- ASSERT(section_storage);
+ VERIFY(section_storage);
section.relocations().for_each_relocation([&](const ELF::Image::Relocation& relocation) {
auto& patch_ptr = *reinterpret_cast<ptrdiff_t*>(section_storage + relocation.offset());
switch (relocation.type()) {
@@ -100,7 +100,7 @@ int Process::sys$module_load(Userspace<const char*> user_path, size_t path_lengt
if (relocation.symbol().bind() == STB_LOCAL) {
auto* section_storage_containing_symbol = section_storage_by_name.get(relocation.symbol().section().name()).value_or(nullptr);
- ASSERT(section_storage_containing_symbol);
+ VERIFY(section_storage_containing_symbol);
u32 symbol_address = (ptrdiff_t)(section_storage_containing_symbol + relocation.symbol().value());
if (symbol_address == 0)
missing_symbols = true;
@@ -113,7 +113,7 @@ int Process::sys$module_load(Userspace<const char*> user_path, size_t path_lengt
dbgln(" Symbol address: {:p}", symbol_address);
patch_ptr += symbol_address;
} else {
- ASSERT_NOT_REACHED();
+ VERIFY_NOT_REACHED();
}
break;
}
diff --git a/Kernel/Syscalls/ptrace.cpp b/Kernel/Syscalls/ptrace.cpp
index 1382998279..e882b02dcd 100644
--- a/Kernel/Syscalls/ptrace.cpp
+++ b/Kernel/Syscalls/ptrace.cpp
@@ -205,7 +205,7 @@ KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
if (region->is_shared()) {
// If the region is shared, we change its vmobject to a PrivateInodeVMObject
// to prevent the write operation from changing any shared inode data
- ASSERT(region->vmobject().is_shared_inode());
+ VERIFY(region->vmobject().is_shared_inode());
region->set_vmobject(PrivateInodeVMObject::create_with_inode(static_cast<SharedInodeVMObject&>(region->vmobject()).inode()));
region->set_shared(false);
}
diff --git a/Kernel/Syscalls/select.cpp b/Kernel/Syscalls/select.cpp
index 6d464d1f58..0009fdf37f 100644
--- a/Kernel/Syscalls/select.cpp
+++ b/Kernel/Syscalls/select.cpp
@@ -226,15 +226,15 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
pfd.revents |= POLLNVAL;
} else {
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Read) {
- ASSERT(pfd.events & POLLIN);
+ VERIFY(pfd.events & POLLIN);
pfd.revents |= POLLIN;
}
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::ReadPriority) {
- ASSERT(pfd.events & POLLPRI);
+ VERIFY(pfd.events & POLLPRI);
pfd.revents |= POLLPRI;
}
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Write) {
- ASSERT(pfd.events & POLLOUT);
+ VERIFY(pfd.events & POLLOUT);
pfd.revents |= POLLOUT;
}
}
diff --git a/Kernel/Syscalls/socket.cpp b/Kernel/Syscalls/socket.cpp
index 960aa7a8ec..6529edef43 100644
--- a/Kernel/Syscalls/socket.cpp
+++ b/Kernel/Syscalls/socket.cpp
@@ -122,7 +122,7 @@ int Process::sys$accept(int accepting_socket_fd, Userspace<sockaddr*> user_addre
}
}
auto accepted_socket = socket.accept();
- ASSERT(accepted_socket);
+ VERIFY(accepted_socket);
if (user_address) {
u8 address_buffer[sizeof(sockaddr_un)];
@@ -263,7 +263,7 @@ ssize_t Process::sys$recvmsg(int sockfd, Userspace<struct msghdr*> user_msg, int
int msg_flags = 0;
if (result.value() > iovs[0].iov_len) {
- ASSERT(socket.type() != SOCK_STREAM);
+ VERIFY(socket.type() != SOCK_STREAM);
msg_flags |= MSG_TRUNC;
}
diff --git a/Kernel/Syscalls/thread.cpp b/Kernel/Syscalls/thread.cpp
index fc24d2d731..6ad8e266c7 100644
--- a/Kernel/Syscalls/thread.cpp
+++ b/Kernel/Syscalls/thread.cpp
@@ -102,7 +102,7 @@ void Process::sys$exit_thread(Userspace<void*> exit_value)
}
Thread::current()->exit(reinterpret_cast<void*>(exit_value.ptr()));
- ASSERT_NOT_REACHED();
+ VERIFY_NOT_REACHED();
}
int Process::sys$detach_thread(pid_t tid)
diff --git a/Kernel/Syscalls/unveil.cpp b/Kernel/Syscalls/unveil.cpp
index 8657ecfee0..20a91edb69 100644
--- a/Kernel/Syscalls/unveil.cpp
+++ b/Kernel/Syscalls/unveil.cpp
@@ -125,7 +125,7 @@ int Process::sys$unveil(Userspace<const Syscall::SC_unveil_params*> user_params)
lexical_path.parts().end(),
{ new_unveiled_path, (UnveilAccess)new_permissions, true },
[](auto& parent, auto& it) -> Optional<UnveilMetadata> { return UnveilMetadata { String::formatted("{}/{}", parent.path(), *it), parent.permissions(), false, parent.permissions_inherited_from_root() }; });
- ASSERT(m_veil_state != VeilState::Locked);
+ VERIFY(m_veil_state != VeilState::Locked);
m_veil_state = VeilState::Dropped;
return 0;
}
diff --git a/Kernel/Syscalls/waitid.cpp b/Kernel/Syscalls/waitid.cpp
index d9dc09f509..c2ca611a89 100644
--- a/Kernel/Syscalls/waitid.cpp
+++ b/Kernel/Syscalls/waitid.cpp
@@ -34,7 +34,7 @@ KResultOr<siginfo_t> Process::do_waitid(idtype_t idtype, int id, int options)
KResultOr<siginfo_t> result = KResult(KSuccess);
if (Thread::current()->block<Thread::WaitBlocker>({}, options, idtype, id, result).was_interrupted())
return EINTR;
- ASSERT(!result.is_error() || (options & WNOHANG) || result.error() != KSuccess);
+ VERIFY(!result.is_error() || (options & WNOHANG) || result.error() != KSuccess);
return result;
}
diff --git a/Kernel/Syscalls/write.cpp b/Kernel/Syscalls/write.cpp
index b80f6c6912..e1006163ab 100644
--- a/Kernel/Syscalls/write.cpp
+++ b/Kernel/Syscalls/write.cpp
@@ -91,7 +91,7 @@ ssize_t Process::do_write(FileDescription& description, const UserOrKernelBuffer
if (!description.can_write()) {
if (!description.is_blocking()) {
// Short write: We can no longer write to this non-blocking description.
- ASSERT(total_nwritten > 0);
+ VERIFY(total_nwritten > 0);
return total_nwritten;
}
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;