diff options
author | Gunnar Beutner <gbeutner@serenityos.org> | 2021-04-29 14:54:15 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-04-29 20:26:36 +0200 |
commit | 55ae52fdf81cdcc7d1bf7b883953e2639701d21b (patch) | |
tree | a0ec01f87b49f3261b5fe00344d5cc72615231cc /Kernel/Arch/i386/SafeMem.cpp | |
parent | b8612590984ee39d519ebf45f80e051b6f0d75bf (diff) | |
download | serenity-55ae52fdf81cdcc7d1bf7b883953e2639701d21b.zip |
Kernel: Enable building the kernel with -flto
GCC with -flto is more aggressive when it comes to inlining and
discarding functions which is why we must mark some of the functions
as NEVER_INLINE (because they contain asm labels which would be
duplicated in the object files if the compiler decides to inline
the function elsewhere) and __attribute__((used)) for others so
that GCC doesn't discard them.
Diffstat (limited to 'Kernel/Arch/i386/SafeMem.cpp')
-rw-r--r-- | Kernel/Arch/i386/SafeMem.cpp | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/Kernel/Arch/i386/SafeMem.cpp b/Kernel/Arch/i386/SafeMem.cpp index 166f6493e2..4b96a4b4cf 100644 --- a/Kernel/Arch/i386/SafeMem.cpp +++ b/Kernel/Arch/i386/SafeMem.cpp @@ -40,7 +40,7 @@ extern "C" u8* safe_atomic_compare_exchange_relaxed_faulted; namespace Kernel { CODE_SECTION(".text.safemem") -bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at) +NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at) { fault_at = nullptr; size_t dest = (size_t)dest_ptr; @@ -86,7 +86,7 @@ bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at) } CODE_SECTION(".text.safemem") -ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at) +NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at) { ssize_t count = 0; fault_at = nullptr; @@ -115,7 +115,7 @@ ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at) } CODE_SECTION(".text.safemem") -bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) +NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) { fault_at = nullptr; size_t dest = (size_t)dest_ptr; @@ -163,7 +163,7 @@ bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) } CODE_SECTION(".text.safemem.atomic") -Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val) +NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val) { u32 result; bool error; @@ -181,7 +181,7 @@ Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val) } CODE_SECTION(".text.safemem.atomic") -Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val) +NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val) { u32 result; bool error; @@ -199,7 +199,7 @@ Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val) } CODE_SECTION(".text.safemem.atomic") -Optional<u32> safe_atomic_load_relaxed(volatile u32* var) +NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var) { u32 result; bool error; @@ -217,7 +217,7 @@ Optional<u32> safe_atomic_load_relaxed(volatile u32* var) } CODE_SECTION(".text.safemem.atomic") -bool safe_atomic_store_relaxed(volatile u32* var, u32 val) +NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val) { bool error; asm volatile( @@ -232,7 +232,7 @@ bool safe_atomic_store_relaxed(volatile u32* var, u32 val) } CODE_SECTION(".text.safemem.atomic") -Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val) +NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val) { // NOTE: accessing expected is NOT protected as it should always point // to a valid location in kernel memory! |