From 9f76b1612439d816e55a722df7a3a62f4718895d Mon Sep 17 00:00:00 2001 From: Timon Kruiper Date: Mon, 2 May 2022 23:01:03 +0200 Subject: Kernel: Implement safe_memcpy for the aarch64 build The implementation just calls the regular memcpy, and is not safe yet. This can be done later. --- Kernel/Arch/aarch64/Dummy.cpp | 57 -------------------------------------- Kernel/Arch/aarch64/SafeMem.cpp | 61 +++++++++++++++++++++++++++++++++++++++++ Kernel/CMakeLists.txt | 1 + 3 files changed, 62 insertions(+), 57 deletions(-) create mode 100644 Kernel/Arch/aarch64/SafeMem.cpp diff --git a/Kernel/Arch/aarch64/Dummy.cpp b/Kernel/Arch/aarch64/Dummy.cpp index c8f9092861..9c947b330a 100644 --- a/Kernel/Arch/aarch64/Dummy.cpp +++ b/Kernel/Arch/aarch64/Dummy.cpp @@ -141,63 +141,6 @@ void KString::operator delete(void*) VERIFY_NOT_REACHED(); } -// SafeMem.h -bool safe_memset(void*, int, size_t, void*&); -bool safe_memset(void*, int, size_t, void*&) -{ - VERIFY_NOT_REACHED(); - return false; -} - -ssize_t safe_strnlen(char const*, unsigned long, void*&); -ssize_t safe_strnlen(char const*, unsigned long, void*&) -{ - VERIFY_NOT_REACHED(); - return 0; -} - -bool safe_memcpy(void*, void const*, unsigned long, void*&); -bool safe_memcpy(void*, void const*, unsigned long, void*&) -{ - VERIFY_NOT_REACHED(); - return false; -} - -Optional safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32); -Optional safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32) -{ - VERIFY_NOT_REACHED(); - return {}; -} - -Optional safe_atomic_load_relaxed(u32 volatile*); -Optional safe_atomic_load_relaxed(u32 volatile*) -{ - VERIFY_NOT_REACHED(); - return {}; -} - -Optional safe_atomic_fetch_add_relaxed(u32 volatile*, u32); -Optional safe_atomic_fetch_add_relaxed(u32 volatile*, u32) -{ - VERIFY_NOT_REACHED(); - return {}; -} - -Optional safe_atomic_exchange_relaxed(u32 volatile*, u32); -Optional safe_atomic_exchange_relaxed(u32 volatile*, u32) -{ - VERIFY_NOT_REACHED(); - return {}; -} - -bool safe_atomic_store_relaxed(u32 volatile*, u32); -bool safe_atomic_store_relaxed(u32 volatile*, u32) -{ - VERIFY_NOT_REACHED(); - return {}; -} - } extern "C" { diff --git a/Kernel/Arch/aarch64/SafeMem.cpp b/Kernel/Arch/aarch64/SafeMem.cpp new file mode 100644 index 0000000000..93167f1108 --- /dev/null +++ b/Kernel/Arch/aarch64/SafeMem.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022, Timon Kruiper + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include + +namespace Kernel { + +bool safe_memset(void*, int, size_t, void*&) +{ + VERIFY_NOT_REACHED(); + return false; +} + +ssize_t safe_strnlen(char const*, unsigned long, void*&) +{ + VERIFY_NOT_REACHED(); + return 0; +} + +bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*&) +{ + // FIXME: Actually implement a safe memcpy. + memcpy(dest_ptr, src_ptr, n); + return true; +} + +Optional safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32) +{ + VERIFY_NOT_REACHED(); + return {}; +} + +Optional safe_atomic_load_relaxed(u32 volatile*) +{ + VERIFY_NOT_REACHED(); + return {}; +} + +Optional safe_atomic_fetch_add_relaxed(u32 volatile*, u32) +{ + VERIFY_NOT_REACHED(); + return {}; +} + +Optional safe_atomic_exchange_relaxed(u32 volatile*, u32) +{ + VERIFY_NOT_REACHED(); + return {}; +} + +bool safe_atomic_store_relaxed(u32 volatile*, u32) +{ + VERIFY_NOT_REACHED(); + return {}; +} + +} diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index f1a1c6231d..acc4cbf13c 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -418,6 +418,7 @@ else() Arch/aarch64/kprintf.cpp Arch/aarch64/MainIdRegister.cpp Arch/aarch64/PageDirectory.cpp + Arch/aarch64/SafeMem.cpp Arch/aarch64/ScopedCritical.cpp Arch/aarch64/SmapDisabler.cpp Arch/aarch64/init.cpp -- cgit v1.2.3