summaryrefslogtreecommitdiff
path: root/Kernel/Arch/aarch64/Exceptions.cpp
blob: af95df6d853a405c57cebff15014e5b037e1a856 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
/*
 * Copyright (c) 2021, James Mintram <me@jamesrm.com>
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#include <Kernel/Arch/aarch64/ASM_wrapper.h>
#include <Kernel/Arch/aarch64/CPU.h>
#include <Kernel/Arch/aarch64/Processor.h>
#include <Kernel/Arch/aarch64/Registers.h>
#include <Kernel/Panic.h>

extern "C" uintptr_t vector_table_el1;

namespace Kernel {

static void drop_el3_to_el2()
{
    Aarch64::SCR_EL3 secure_configuration_register_el3 = {};

    secure_configuration_register_el3.ST = 1;  // Don't trap access to Counter-timer Physical Secure registers
    secure_configuration_register_el3.RW = 1;  // Lower level to use Aarch64
    secure_configuration_register_el3.NS = 1;  // Non-secure state
    secure_configuration_register_el3.HCE = 1; // Enable Hypervisor instructions at all levels

    Aarch64::SCR_EL3::write(secure_configuration_register_el3);

    Aarch64::SPSR_EL3 saved_program_status_register_el3 = {};

    // Mask (disable) all interrupts
    saved_program_status_register_el3.A = 1;
    saved_program_status_register_el3.I = 1;
    saved_program_status_register_el3.F = 1;
    saved_program_status_register_el3.D = 1;

    // Indicate EL1 as exception origin mode (so we go back there)
    saved_program_status_register_el3.M = Aarch64::SPSR_EL3::Mode::EL2h;

    // Set the register
    Aarch64::SPSR_EL3::write(saved_program_status_register_el3);

    // This will jump into os_start() below
    Aarch64::Asm::enter_el2_from_el3();
}

static void drop_el2_to_el1()
{
    Aarch64::HCR_EL2 hypervisor_configuration_register_el2 = {};
    hypervisor_configuration_register_el2.RW = 1; // EL1 to use 64-bit mode
    Aarch64::HCR_EL2::write(hypervisor_configuration_register_el2);

    Aarch64::SPSR_EL2 saved_program_status_register_el2 = {};

    // Mask (disable) all interrupts
    saved_program_status_register_el2.A = 1;
    saved_program_status_register_el2.I = 1;
    saved_program_status_register_el2.F = 1;

    // Indicate EL1 as exception origin mode (so we go back there)
    saved_program_status_register_el2.M = Aarch64::SPSR_EL2::Mode::EL1h;

    Aarch64::SPSR_EL2::write(saved_program_status_register_el2);
    Aarch64::Asm::enter_el1_from_el2();
}

static void setup_el1()
{
    Aarch64::SCTLR_EL1 system_control_register_el1 = Aarch64::SCTLR_EL1::reset_value();

    system_control_register_el1.UCT = 1;  // Don't trap access to CTR_EL0
    system_control_register_el1.nTWE = 1; // Don't trap WFE instructions
    system_control_register_el1.nTWI = 1; // Don't trap WFI instructions
    system_control_register_el1.DZE = 1;  // Don't trap DC ZVA instructions
    system_control_register_el1.UMA = 1;  // Don't trap access to DAIF (debugging) flags of EFLAGS register
    system_control_register_el1.SA0 = 1;  // Enable stack access alignment check for EL0
    system_control_register_el1.SA = 1;   // Enable stack access alignment check for EL1
    system_control_register_el1.A = 1;    // Enable memory access alignment check

    Aarch64::SCTLR_EL1::write(system_control_register_el1);

    Aarch64::CPACR_EL1 cpacr_el1 = {};
    cpacr_el1.ZEN = 0;     // Trap SVE instructions at EL1 and EL0
    cpacr_el1.FPEN = 0b11; // Don't trap Advanced SIMD and floating-point instructions
    cpacr_el1.SMEN = 0;    // Trap SME instructions at EL1 and EL0
    cpacr_el1.TTA = 0;     // Don't trap access to trace registers
    Aarch64::CPACR_EL1::write(cpacr_el1);

    Aarch64::Asm::load_el1_vector_table(&vector_table_el1);
}

void initialize_exceptions()
{
    auto base_exception_level = Aarch64::Asm::get_current_exception_level();

    if (base_exception_level > Aarch64::Asm::ExceptionLevel::EL3) {
        panic_without_mmu("Started in unknown EL (Greater than EL3)"sv);
    } else if (base_exception_level < Aarch64::Asm::ExceptionLevel::EL1) {
        panic_without_mmu("Started in unsupported EL (Less than EL1)"sv);
    } else {
        if (base_exception_level == Aarch64::Asm::ExceptionLevel::EL1)
            dbgln_without_mmu("Started in EL1"sv);
        else if (base_exception_level == Aarch64::Asm::ExceptionLevel::EL2)
            dbgln_without_mmu("Started in EL2"sv);
        else if (base_exception_level == Aarch64::Asm::ExceptionLevel::EL3)
            dbgln_without_mmu("Started in EL3"sv);
    }

    if (base_exception_level > Aarch64::Asm::ExceptionLevel::EL2) {
        drop_el3_to_el2();
        dbgln_without_mmu("Dropped to EL2"sv);
    }

    if (base_exception_level > Aarch64::Asm::ExceptionLevel::EL1) {
        drop_el2_to_el1();
        dbgln_without_mmu("Dropped to EL1"sv);
    }

    setup_el1();
    dbgln_without_mmu("Set up EL1"sv);
}

// NOTE: The normal PANIC macro cannot be used early in the boot process when the MMU is disabled,
//       as it will access global variables, which will cause a crash since they aren't mapped yet.
void panic_without_mmu(StringView message)
{
    (void)message;
    // FIXME: Print out message to early boot console.
    Processor::halt();
}

void dbgln_without_mmu(StringView message)
{
    (void)message;
    // FIXME: Print out message to early boot console.
}

}