diff options
author | Timon Kruiper <timonkruiper@gmail.com> | 2023-04-06 17:11:20 +0200 |
---|---|---|
committer | Linus Groh <mail@linusgroh.de> | 2023-04-13 20:22:08 +0200 |
commit | baa5cb9e30a76ced3ff0e0de0ce2e79c8ac94898 (patch) | |
tree | 5451b25497a0cc369b163f20159b623003ccbf5a | |
parent | 10030038e910237816b6a181deb69a88d15d6550 (diff) | |
download | serenity-baa5cb9e30a76ced3ff0e0de0ce2e79c8ac94898.zip |
Kernel/aarch64: Add volatile modifier to various asm statements
This prevents the optimizer from reordering them, which hopefully
prevents future bugs.
-rw-r--r-- | Kernel/Arch/aarch64/ASM_wrapper.h | 22 | ||||
-rw-r--r-- | Kernel/Arch/aarch64/Registers.h | 130 |
2 files changed, 76 insertions, 76 deletions
diff --git a/Kernel/Arch/aarch64/ASM_wrapper.h b/Kernel/Arch/aarch64/ASM_wrapper.h index 9bb7a7ac99..f28d14fcc8 100644 --- a/Kernel/Arch/aarch64/ASM_wrapper.h +++ b/Kernel/Arch/aarch64/ASM_wrapper.h @@ -16,36 +16,36 @@ namespace Kernel::Aarch64::Asm { inline void set_ttbr1_el1(FlatPtr ttbr1_el1) { - asm("msr ttbr1_el1, %[value]" ::[value] "r"(ttbr1_el1)); + asm volatile("msr ttbr1_el1, %[value]" ::[value] "r"(ttbr1_el1)); } inline void set_ttbr0_el1(FlatPtr ttbr0_el1) { - asm("msr ttbr0_el1, %[value]" ::[value] "r"(ttbr0_el1)); + asm volatile("msr ttbr0_el1, %[value]" ::[value] "r"(ttbr0_el1)); } inline FlatPtr get_ttbr0_el1() { FlatPtr ttbr0_el1; - asm("mrs %[value], ttbr0_el1\n" - : [value] "=r"(ttbr0_el1)); + asm volatile("mrs %[value], ttbr0_el1\n" + : [value] "=r"(ttbr0_el1)); return ttbr0_el1; } inline void set_sp_el1(FlatPtr sp_el1) { - asm("msr sp_el1, %[value]" ::[value] "r"(sp_el1)); + asm volatile("msr sp_el1, %[value]" ::[value] "r"(sp_el1)); } inline void set_tpidr_el0(FlatPtr tpidr_el0) { - asm("msr tpidr_el0, %[value]" ::[value] "r"(tpidr_el0)); + asm volatile("msr tpidr_el0, %[value]" ::[value] "r"(tpidr_el0)); } inline void flush() { - asm("dsb ish"); - asm("isb"); + asm volatile("dsb ish"); + asm volatile("isb"); } [[noreturn]] inline void halt() @@ -66,8 +66,8 @@ inline ExceptionLevel get_current_exception_level() { u64 current_exception_level; - asm("mrs %[value], CurrentEL" - : [value] "=r"(current_exception_level)); + asm volatile("mrs %[value], CurrentEL" + : [value] "=r"(current_exception_level)); current_exception_level = (current_exception_level >> 2) & 0x3; return static_cast<ExceptionLevel>(current_exception_level); @@ -83,7 +83,7 @@ inline void wait_cycles(int n) inline void load_el1_vector_table(void* vector_table) { - asm("msr VBAR_EL1, %[value]" ::[value] "r"(vector_table)); + asm volatile("msr VBAR_EL1, %[value]" ::[value] "r"(vector_table)); } inline void enter_el2_from_el3() diff --git a/Kernel/Arch/aarch64/Registers.h b/Kernel/Arch/aarch64/Registers.h index 264c4ab177..5ca4e626ce 100644 --- a/Kernel/Arch/aarch64/Registers.h +++ b/Kernel/Arch/aarch64/Registers.h @@ -38,8 +38,8 @@ struct alignas(u64) ID_AA64ISAR0_EL1 { { ID_AA64ISAR0_EL1 feature_register; - asm("mrs %[value], ID_AA64ISAR0_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64ISAR0_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -70,8 +70,8 @@ struct alignas(u64) ID_AA64ISAR1_EL1 { { ID_AA64ISAR1_EL1 feature_register; - asm("mrs %[value], ID_AA64ISAR1_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64ISAR1_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -101,8 +101,8 @@ struct alignas(u64) ID_AA64ISAR2_EL1 { { ID_AA64ISAR2_EL1 feature_register; - asm("mrs %[value], ID_AA64ISAR2_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64ISAR2_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -133,8 +133,8 @@ struct alignas(u64) ID_AA64PFR0_EL1 { { ID_AA64PFR0_EL1 feature_register; - asm("mrs %[value], ID_AA64PFR0_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64PFR0_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -165,8 +165,8 @@ struct alignas(u64) ID_AA64PFR1_EL1 { { ID_AA64PFR1_EL1 feature_register; - asm("mrs %[value], ID_AA64PFR1_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64PFR1_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -186,8 +186,8 @@ struct alignas(u64) ID_AA64PFR2_EL1 { { ID_AA64PFR2_EL1 feature_register; - asm("mrs %[value], s3_0_c0_c4_2" // encoded ID_AA64PFR2_EL1 register - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], s3_0_c0_c4_2" // encoded ID_AA64PFR2_EL1 register + : [value] "=r"(feature_register)); return feature_register; } @@ -211,8 +211,8 @@ struct alignas(u64) MPIDR_EL1 { { MPIDR_EL1 affinity_register; - asm("mrs %[value], MPIDR_EL1" - : [value] "=r"(affinity_register)); + asm volatile("mrs %[value], MPIDR_EL1" + : [value] "=r"(affinity_register)); return affinity_register; } @@ -242,8 +242,8 @@ struct alignas(u64) ID_AA64MMFR0_EL1 { { ID_AA64MMFR0_EL1 feature_register; - asm("mrs %[value], ID_AA64MMFR0_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64MMFR0_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -274,8 +274,8 @@ struct alignas(u64) ID_AA64MMFR1_EL1 { { ID_AA64MMFR1_EL1 feature_register; - asm("mrs %[value], ID_AA64MMFR1_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64MMFR1_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -306,8 +306,8 @@ struct alignas(u64) ID_AA64MMFR2_EL1 { { ID_AA64MMFR2_EL1 feature_register; - asm("mrs %[value], ID_AA64MMFR2_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64MMFR2_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -338,8 +338,8 @@ struct alignas(u64) ID_AA64MMFR3_EL1 { { ID_AA64MMFR3_EL1 feature_register; - asm("mrs %[value], s3_0_c0_c7_3" // encoded ID_AA64MMFR3_EL1 register - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], s3_0_c0_c7_3" // encoded ID_AA64MMFR3_EL1 register + : [value] "=r"(feature_register)); return feature_register; } @@ -358,8 +358,8 @@ struct alignas(u64) ID_AA64MMFR4_EL1 { { ID_AA64MMFR4_EL1 feature_register; - asm("mrs %[value], s3_0_c0_c7_4" // encoded ID_AA64MMFR4_EL1 register - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], s3_0_c0_c7_4" // encoded ID_AA64MMFR4_EL1 register + : [value] "=r"(feature_register)); return feature_register; } @@ -390,8 +390,8 @@ struct alignas(u64) ID_AA64SMFR0_EL1 { { ID_AA64SMFR0_EL1 feature_register; - asm("mrs %[value], s3_0_c0_c4_5" // encoded ID_AA64SMFR0_EL1 register - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], s3_0_c0_c4_5" // encoded ID_AA64SMFR0_EL1 register + : [value] "=r"(feature_register)); return feature_register; } @@ -421,8 +421,8 @@ struct alignas(u64) ID_AA64ZFR0_EL1 { { ID_AA64ZFR0_EL1 feature_register; - asm("mrs %[value], s3_0_c0_c4_4" // encoded ID_AA64ZFR0_EL1 register - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], s3_0_c0_c4_4" // encoded ID_AA64ZFR0_EL1 register + : [value] "=r"(feature_register)); return feature_register; } @@ -453,8 +453,8 @@ struct alignas(u64) ID_AA64DFR0_EL1 { { ID_AA64DFR0_EL1 feature_register; - asm("mrs %[value], ID_AA64DFR0_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64DFR0_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -480,8 +480,8 @@ struct alignas(u64) ID_AA64DFR1_EL1 { { ID_AA64DFR1_EL1 feature_register; - asm("mrs %[value], ID_AA64DFR1_EL1" - : [value] "=r"(feature_register)); + asm volatile("mrs %[value], ID_AA64DFR1_EL1" + : [value] "=r"(feature_register)); return feature_register; } @@ -498,8 +498,8 @@ struct alignas(u64) CNTFRQ_EL0 { { CNTFRQ_EL0 frequency; - asm("mrs %[value], CNTFRQ_EL0" - : [value] "=r"(frequency)); + asm volatile("mrs %[value], CNTFRQ_EL0" + : [value] "=r"(frequency)); return frequency; } @@ -597,15 +597,15 @@ struct alignas(u64) TCR_EL1 { static inline void write(TCR_EL1 tcr_el1) { - asm("msr tcr_el1, %[value]" ::[value] "r"(tcr_el1)); + asm volatile("msr tcr_el1, %[value]" ::[value] "r"(tcr_el1)); } static inline TCR_EL1 read() { TCR_EL1 tcr_el1; - asm("mrs %[value], tcr_el1_el1" - : [value] "=r"(tcr_el1)); + asm volatile("mrs %[value], tcr_el1" + : [value] "=r"(tcr_el1)); return tcr_el1; } @@ -672,15 +672,15 @@ struct alignas(u64) SCTLR_EL1 { static inline void write(SCTLR_EL1 sctlr_el1) { - asm("msr sctlr_el1, %[value]" ::[value] "r"(sctlr_el1)); + asm volatile("msr sctlr_el1, %[value]" ::[value] "r"(sctlr_el1)); } static inline SCTLR_EL1 read() { SCTLR_EL1 sctlr; - asm("mrs %[value], sctlr_el1" - : [value] "=r"(sctlr)); + asm volatile("mrs %[value], sctlr_el1" + : [value] "=r"(sctlr)); return sctlr; } @@ -711,8 +711,8 @@ struct alignas(u64) MIDR_EL1 { { MIDR_EL1 main_id_register; - asm("mrs %[value], MIDR_EL1" - : [value] "=r"(main_id_register)); + asm volatile("mrs %[value], MIDR_EL1" + : [value] "=r"(main_id_register)); return main_id_register; } @@ -728,8 +728,8 @@ struct alignas(u64) AIDR_EL1 { { AIDR_EL1 auxiliary_id_register; - asm("mrs %[value], AIDR_EL1" - : [value] "=r"(auxiliary_id_register)); + asm volatile("mrs %[value], AIDR_EL1" + : [value] "=r"(auxiliary_id_register)); return auxiliary_id_register; } @@ -786,15 +786,15 @@ struct alignas(u64) HCR_EL2 { static inline void write(HCR_EL2 hcr_el2) { - asm("msr hcr_el2, %[value]" ::[value] "r"(hcr_el2)); + asm volatile("msr hcr_el2, %[value]" ::[value] "r"(hcr_el2)); } static inline HCR_EL2 read() { HCR_EL2 spsr; - asm("mrs %[value], hcr_el2" - : [value] "=r"(spsr)); + asm volatile("mrs %[value], hcr_el2" + : [value] "=r"(spsr)); return spsr; } @@ -842,15 +842,15 @@ struct alignas(u64) SCR_EL3 { static inline void write(SCR_EL3 scr_el3) { - asm("msr scr_el3, %[value]" ::[value] "r"(scr_el3)); + asm volatile("msr scr_el3, %[value]" ::[value] "r"(scr_el3)); } static inline SCR_EL3 read() { SCR_EL3 scr; - asm("mrs %[value], scr_el3" - : [value] "=r"(scr)); + asm volatile("mrs %[value], scr_el3" + : [value] "=r"(scr)); return scr; } @@ -890,15 +890,15 @@ struct alignas(u64) SPSR_EL1 { static inline void write(SPSR_EL1 spsr_el1) { - asm("msr spsr_el1, %[value]" ::[value] "r"(spsr_el1)); + asm volatile("msr spsr_el1, %[value]" ::[value] "r"(spsr_el1)); } static inline SPSR_EL1 read() { SPSR_EL1 spsr; - asm("mrs %[value], spsr_el1" - : [value] "=r"(spsr)); + asm volatile("mrs %[value], spsr_el1" + : [value] "=r"(spsr)); return spsr; } @@ -939,15 +939,15 @@ struct alignas(u64) SPSR_EL2 { static inline void write(SPSR_EL2 spsr_el2) { - asm("msr spsr_el2, %[value]" ::[value] "r"(spsr_el2)); + asm volatile("msr spsr_el2, %[value]" ::[value] "r"(spsr_el2)); } static inline SPSR_EL2 read() { SPSR_EL2 spsr; - asm("mrs %[value], spsr_el2" - : [value] "=r"(spsr)); + asm volatile("mrs %[value], spsr_el2" + : [value] "=r"(spsr)); return spsr; } @@ -988,15 +988,15 @@ struct alignas(u64) SPSR_EL3 { static inline void write(SPSR_EL3 spsr_el3) { - asm("msr spsr_el3, %[value]" ::[value] "r"(spsr_el3)); + asm volatile("msr spsr_el3, %[value]" ::[value] "r"(spsr_el3)); } static inline SPSR_EL3 read() { SPSR_EL3 spsr; - asm("mrs %[value], spsr_el3" - : [value] "=r"(spsr)); + asm volatile("mrs %[value], spsr_el3" + : [value] "=r"(spsr)); return spsr; } @@ -1011,7 +1011,7 @@ struct alignas(u64) MAIR_EL1 { static inline void write(MAIR_EL1 mair_el1) { - asm("msr mair_el1, %[value]" ::[value] "r"(mair_el1)); + asm volatile("msr mair_el1, %[value]" ::[value] "r"(mair_el1)); } }; static_assert(sizeof(MAIR_EL1) == 8); @@ -1029,8 +1029,8 @@ struct ESR_EL1 { { ESR_EL1 esr_el1; - asm("mrs %[value], esr_el1" - : [value] "=r"(esr_el1)); + asm volatile("mrs %[value], esr_el1" + : [value] "=r"(esr_el1)); return esr_el1; } @@ -1045,8 +1045,8 @@ struct FAR_EL1 { { FAR_EL1 far_el1; - asm("mrs %[value], far_el1" - : [value] "=r"(far_el1)); + asm volatile("mrs %[value], far_el1" + : [value] "=r"(far_el1)); return far_el1; } @@ -1359,7 +1359,7 @@ struct alignas(u64) CPACR_EL1 { static inline void write(CPACR_EL1 cpacr_el1) { - asm("msr cpacr_el1, %[value]" ::[value] "r"(cpacr_el1)); + asm volatile("msr cpacr_el1, %[value]" ::[value] "r"(cpacr_el1)); } }; static_assert(sizeof(CPACR_EL1) == 8); |