summaryrefslogtreecommitdiff
path: root/Kernel/Arch
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel/Arch')
-rw-r--r--Kernel/Arch/i386/CPU.cpp18
-rw-r--r--Kernel/Arch/i386/SafeMem.cpp16
-rw-r--r--Kernel/Arch/x86/CPU.h8
-rw-r--r--Kernel/Arch/x86/SafeMem.h16
4 files changed, 29 insertions, 29 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index f413135cfd..6b54076dab 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -47,20 +47,20 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
// The compiler can't see the calls to these functions inside assembly.
// Declare them, to avoid dead code warnings.
-extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread);
-extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap);
-extern "C" u32 do_init_context(Thread* thread, u32 flags);
+extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
+extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
+extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
extern "C" void exit_kernel_thread(void);
-extern "C" void pre_init_finished(void);
-extern "C" void post_init_finished(void);
-extern "C" void handle_interrupt(TrapFrame*);
+extern "C" void pre_init_finished(void) __attribute__((used));
+extern "C" void post_init_finished(void) __attribute__((used));
+extern "C" void handle_interrupt(TrapFrame*) __attribute__((used));
// clang-format off
#if ARCH(I386)
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
- extern "C" void title##_handler(TrapFrame*); \
+ extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
asm( \
".globl " #title "_asm_entry\n" \
"" #title "_asm_entry: \n" \
@@ -84,8 +84,8 @@ extern "C" void handle_interrupt(TrapFrame*);
" jmp common_trap_exit \n");
#define EH_ENTRY_NO_CODE(ec, title) \
- extern "C" void title##_handler(TrapFrame*); \
- extern "C" void title##_asm_entry(); \
+ extern "C" void title##_asm_entry(); \
+ extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
asm( \
".globl " #title "_asm_entry\n" \
"" #title "_asm_entry: \n" \
diff --git a/Kernel/Arch/i386/SafeMem.cpp b/Kernel/Arch/i386/SafeMem.cpp
index 166f6493e2..4b96a4b4cf 100644
--- a/Kernel/Arch/i386/SafeMem.cpp
+++ b/Kernel/Arch/i386/SafeMem.cpp
@@ -40,7 +40,7 @@ extern "C" u8* safe_atomic_compare_exchange_relaxed_faulted;
namespace Kernel {
CODE_SECTION(".text.safemem")
-bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
+NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
@@ -86,7 +86,7 @@ bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
}
CODE_SECTION(".text.safemem")
-ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
+NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
{
ssize_t count = 0;
fault_at = nullptr;
@@ -115,7 +115,7 @@ ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
}
CODE_SECTION(".text.safemem")
-bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
+NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
@@ -163,7 +163,7 @@ bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
}
CODE_SECTION(".text.safemem.atomic")
-Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
+NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
{
u32 result;
bool error;
@@ -181,7 +181,7 @@ Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
}
CODE_SECTION(".text.safemem.atomic")
-Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
+NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
{
u32 result;
bool error;
@@ -199,7 +199,7 @@ Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
}
CODE_SECTION(".text.safemem.atomic")
-Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
+NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
{
u32 result;
bool error;
@@ -217,7 +217,7 @@ Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
}
CODE_SECTION(".text.safemem.atomic")
-bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
+NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
{
bool error;
asm volatile(
@@ -232,7 +232,7 @@ bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
}
CODE_SECTION(".text.safemem.atomic")
-Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
+NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
{
// NOTE: accessing expected is NOT protected as it should always point
// to a valid location in kernel memory!
diff --git a/Kernel/Arch/x86/CPU.h b/Kernel/Arch/x86/CPU.h
index 6808a4e54c..64f6114e58 100644
--- a/Kernel/Arch/x86/CPU.h
+++ b/Kernel/Arch/x86/CPU.h
@@ -980,7 +980,7 @@ public:
void exit_trap(TrapFrame& trap);
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
- void switch_context(Thread*& from_thread, Thread*& to_thread);
+ NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
u32 init_context(Thread& thread, bool leave_crit);
static Vector<FlatPtr> capture_stack_trace(Thread& thread, size_t max_frames = 0);
@@ -1057,9 +1057,9 @@ struct TrapFrame {
static_assert(TRAP_FRAME_SIZE == sizeof(TrapFrame));
-extern "C" void enter_trap_no_irq(TrapFrame*);
-extern "C" void enter_trap(TrapFrame*);
-extern "C" void exit_trap(TrapFrame*);
+extern "C" void enter_trap_no_irq(TrapFrame*) __attribute__((used));
+extern "C" void enter_trap(TrapFrame*) __attribute__((used));
+extern "C" void exit_trap(TrapFrame*) __attribute__((used));
class MSR {
uint32_t m_msr;
diff --git a/Kernel/Arch/x86/SafeMem.h b/Kernel/Arch/x86/SafeMem.h
index 3b984a663e..66d17fb74e 100644
--- a/Kernel/Arch/x86/SafeMem.h
+++ b/Kernel/Arch/x86/SafeMem.h
@@ -14,14 +14,14 @@ namespace Kernel {
struct RegisterState;
-[[nodiscard]] bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at);
-[[nodiscard]] ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at);
-[[nodiscard]] bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at);
-[[nodiscard]] Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val);
-[[nodiscard]] Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val);
-[[nodiscard]] Optional<u32> safe_atomic_load_relaxed(volatile u32* var);
-[[nodiscard]] bool safe_atomic_store_relaxed(volatile u32* var, u32 val);
-[[nodiscard]] Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val);
+[[nodiscard]] bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at) __attribute__((used));
+[[nodiscard]] ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at) __attribute__((used));
+[[nodiscard]] bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) __attribute__((used));
+[[nodiscard]] Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val) __attribute__((used));
+[[nodiscard]] Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val) __attribute__((used));
+[[nodiscard]] Optional<u32> safe_atomic_load_relaxed(volatile u32* var) __attribute__((used));
+[[nodiscard]] bool safe_atomic_store_relaxed(volatile u32* var, u32 val) __attribute__((used));
+[[nodiscard]] Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val) __attribute__((used));
[[nodiscard]] ALWAYS_INLINE Optional<u32> safe_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
{