summaryrefslogtreecommitdiff
path: root/Kernel/Arch/x86
diff options
context:
space:
mode:
authorGunnar Beutner <gbeutner@serenityos.org>2021-04-29 14:54:15 +0200
committerAndreas Kling <kling@serenityos.org>2021-04-29 20:26:36 +0200
commit55ae52fdf81cdcc7d1bf7b883953e2639701d21b (patch)
treea0ec01f87b49f3261b5fe00344d5cc72615231cc /Kernel/Arch/x86
parentb8612590984ee39d519ebf45f80e051b6f0d75bf (diff)
downloadserenity-55ae52fdf81cdcc7d1bf7b883953e2639701d21b.zip
Kernel: Enable building the kernel with -flto
GCC with -flto is more aggressive when it comes to inlining and discarding functions which is why we must mark some of the functions as NEVER_INLINE (because they contain asm labels which would be duplicated in the object files if the compiler decides to inline the function elsewhere) and __attribute__((used)) for others so that GCC doesn't discard them.
Diffstat (limited to 'Kernel/Arch/x86')
-rw-r--r--Kernel/Arch/x86/CPU.h8
-rw-r--r--Kernel/Arch/x86/SafeMem.h16
2 files changed, 12 insertions, 12 deletions
diff --git a/Kernel/Arch/x86/CPU.h b/Kernel/Arch/x86/CPU.h
index 6808a4e54c..64f6114e58 100644
--- a/Kernel/Arch/x86/CPU.h
+++ b/Kernel/Arch/x86/CPU.h
@@ -980,7 +980,7 @@ public:
void exit_trap(TrapFrame& trap);
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
- void switch_context(Thread*& from_thread, Thread*& to_thread);
+ NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
u32 init_context(Thread& thread, bool leave_crit);
static Vector<FlatPtr> capture_stack_trace(Thread& thread, size_t max_frames = 0);
@@ -1057,9 +1057,9 @@ struct TrapFrame {
static_assert(TRAP_FRAME_SIZE == sizeof(TrapFrame));
-extern "C" void enter_trap_no_irq(TrapFrame*);
-extern "C" void enter_trap(TrapFrame*);
-extern "C" void exit_trap(TrapFrame*);
+extern "C" void enter_trap_no_irq(TrapFrame*) __attribute__((used));
+extern "C" void enter_trap(TrapFrame*) __attribute__((used));
+extern "C" void exit_trap(TrapFrame*) __attribute__((used));
class MSR {
uint32_t m_msr;
diff --git a/Kernel/Arch/x86/SafeMem.h b/Kernel/Arch/x86/SafeMem.h
index 3b984a663e..66d17fb74e 100644
--- a/Kernel/Arch/x86/SafeMem.h
+++ b/Kernel/Arch/x86/SafeMem.h
@@ -14,14 +14,14 @@ namespace Kernel {
struct RegisterState;
-[[nodiscard]] bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at);
-[[nodiscard]] ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at);
-[[nodiscard]] bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at);
-[[nodiscard]] Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val);
-[[nodiscard]] Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val);
-[[nodiscard]] Optional<u32> safe_atomic_load_relaxed(volatile u32* var);
-[[nodiscard]] bool safe_atomic_store_relaxed(volatile u32* var, u32 val);
-[[nodiscard]] Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val);
+[[nodiscard]] bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at) __attribute__((used));
+[[nodiscard]] ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at) __attribute__((used));
+[[nodiscard]] bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) __attribute__((used));
+[[nodiscard]] Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val) __attribute__((used));
+[[nodiscard]] Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val) __attribute__((used));
+[[nodiscard]] Optional<u32> safe_atomic_load_relaxed(volatile u32* var) __attribute__((used));
+[[nodiscard]] bool safe_atomic_store_relaxed(volatile u32* var, u32 val) __attribute__((used));
+[[nodiscard]] Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val) __attribute__((used));
[[nodiscard]] ALWAYS_INLINE Optional<u32> safe_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
{