summaryrefslogtreecommitdiff
path: root/Kernel/Arch/aarch64/Prekernel
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel/Arch/aarch64/Prekernel')
-rw-r--r--Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.S48
-rw-r--r--Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.h14
-rw-r--r--Kernel/Arch/aarch64/Prekernel/Prekernel.h18
-rw-r--r--Kernel/Arch/aarch64/Prekernel/PrekernelCommon.cpp32
-rw-r--r--Kernel/Arch/aarch64/Prekernel/PrekernelExceptions.cpp101
-rw-r--r--Kernel/Arch/aarch64/Prekernel/PrekernelMMU.cpp198
-rw-r--r--Kernel/Arch/aarch64/Prekernel/boot.S33
7 files changed, 444 insertions, 0 deletions
diff --git a/Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.S b/Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.S
new file mode 100644
index 0000000000..33f2d4de7f
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021, Nico Weber <thakis@chromium.org>
+ * Copyright (c) 2021, Marcin Undak <mcinek@gmail.com>
+ * Copyright (c) 2021, Jesse Buhagiar <jooster669@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+.global wait_cycles
+.type wait_cycles, @function
+wait_cycles:
+Lstart:
+ // This is probably too fast when caching and branch prediction is turned on.
+ // FIXME: Make timer-based.
+ subs x0, x0, #1
+ bne Lstart
+ ret
+
+.global enter_el2_from_el3
+.type enter_el2_from_el3, @function
+enter_el2_from_el3:
+ adr x0, entered_el2
+ msr elr_el3, x0
+ eret
+entered_el2:
+ ret
+
+.global enter_el1_from_el2
+.type enter_el1_from_el2, @function
+enter_el1_from_el2:
+ adr x0, entered_el1
+ msr elr_el2, x0
+ eret
+entered_el1:
+ ret
+
+//
+// Installs the EL1 vector table
+// Args:
+// x0 - Address of vector table
+//
+// This function doesn't return a value
+//
+.global el1_vector_table_install
+.type el1_vector_table_install, @function
+el1_vector_table_install:
+ msr VBAR_EL1, x0
+ ret
diff --git a/Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.h b/Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.h
new file mode 100644
index 0000000000..5536904b2b
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/Aarch64_asm_utils.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021, Marcin Undak <mcinek@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+extern "C" void wait_cycles(int n);
+extern "C" void el1_vector_table_install(void* vector_table);
+
+// CPU initialization functions
+extern "C" [[noreturn]] void return_from_el2();
+extern "C" [[noreturn]] void return_from_el3();
diff --git a/Kernel/Arch/aarch64/Prekernel/Prekernel.h b/Kernel/Arch/aarch64/Prekernel/Prekernel.h
new file mode 100644
index 0000000000..1012ad4e3b
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/Prekernel.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021, James Mintram <me@jamesrm.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+namespace Prekernel {
+
+void drop_to_exception_level_1();
+void init_prekernel_page_tables();
+
+[[noreturn]] void panic(char const* msg);
+
+[[noreturn]] void halt();
+
+}
diff --git a/Kernel/Arch/aarch64/Prekernel/PrekernelCommon.cpp b/Kernel/Arch/aarch64/Prekernel/PrekernelCommon.cpp
new file mode 100644
index 0000000000..5f7310daa3
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/PrekernelCommon.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021, James Mintram <me@jamesrm.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <Kernel/Arch/aarch64/Prekernel.h>
+
+#include <Kernel/Arch/aarch64/ASM_wrapper.h>
+#include <Kernel/Arch/aarch64/UART.h>
+
+namespace Prekernel {
+
+[[noreturn]] void panic(char const* msg)
+{
+ auto& uart = Prekernel::UART::the();
+
+ if (msg) {
+ uart.print_str(msg);
+ }
+
+ Prekernel::halt();
+}
+
+[[noreturn]] void halt()
+{
+ for (;;) {
+ asm volatile("wfi");
+ }
+}
+
+}
diff --git a/Kernel/Arch/aarch64/Prekernel/PrekernelExceptions.cpp b/Kernel/Arch/aarch64/Prekernel/PrekernelExceptions.cpp
new file mode 100644
index 0000000000..9feb7fb821
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/PrekernelExceptions.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2021, James Mintram <me@jamesrm.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <Kernel/Arch/aarch64/ASM_wrapper.h>
+#include <Kernel/Arch/aarch64/Aarch64_asm_utils.h>
+#include <Kernel/Arch/aarch64/Prekernel.h>
+#include <Kernel/Arch/aarch64/Registers.h>
+
+extern "C" void enter_el2_from_el3();
+extern "C" void enter_el1_from_el2();
+
+using namespace Kernel;
+
+namespace Prekernel {
+
+static void drop_to_el2()
+{
+ Aarch64::SCR_EL3 secure_configuration_register_el3 = {};
+
+ secure_configuration_register_el3.ST = 1; // Don't trap access to Counter-timer Physical Secure registers
+ secure_configuration_register_el3.RW = 1; // Lower level to use Aarch64
+ secure_configuration_register_el3.NS = 1; // Non-secure state
+ secure_configuration_register_el3.HCE = 1; // Enable Hypervisor instructions at all levels
+
+ Aarch64::SCR_EL3::write(secure_configuration_register_el3);
+
+ Aarch64::SPSR_EL3 saved_program_status_register_el3 = {};
+
+ // Mask (disable) all interrupts
+ saved_program_status_register_el3.A = 1;
+ saved_program_status_register_el3.I = 1;
+ saved_program_status_register_el3.F = 1;
+ saved_program_status_register_el3.D = 1;
+
+ // Indicate EL1 as exception origin mode (so we go back there)
+ saved_program_status_register_el3.M = Aarch64::SPSR_EL3::Mode::EL2t;
+
+ // Set the register
+ Aarch64::SPSR_EL3::write(saved_program_status_register_el3);
+
+ // This will jump into os_start() below
+ enter_el2_from_el3();
+}
+static void drop_to_el1()
+{
+ Aarch64::HCR_EL2 hypervisor_configuration_register_el2 = {};
+ hypervisor_configuration_register_el2.RW = 1; // EL1 to use 64-bit mode
+ Aarch64::HCR_EL2::write(hypervisor_configuration_register_el2);
+
+ Aarch64::SPSR_EL2 saved_program_status_register_el2 = {};
+
+ // Mask (disable) all interrupts
+ saved_program_status_register_el2.A = 1;
+ saved_program_status_register_el2.I = 1;
+ saved_program_status_register_el2.F = 1;
+
+ // Indicate EL1 as exception origin mode (so we go back there)
+ saved_program_status_register_el2.M = Aarch64::SPSR_EL2::Mode::EL1t;
+
+ Aarch64::SPSR_EL2::write(saved_program_status_register_el2);
+ enter_el1_from_el2();
+}
+
+static void set_up_el1()
+{
+ Aarch64::SCTLR_EL1 system_control_register_el1 = Aarch64::SCTLR_EL1::reset_value();
+
+ system_control_register_el1.UCT = 1; // Don't trap access to CTR_EL0
+ system_control_register_el1.nTWE = 1; // Don't trap WFE instructions
+ system_control_register_el1.nTWI = 1; // Don't trap WFI instructions
+ system_control_register_el1.DZE = 1; // Don't trap DC ZVA instructions
+ system_control_register_el1.UMA = 1; // Don't trap access to DAIF (debugging) flags of EFLAGS register
+ system_control_register_el1.SA0 = 1; // Enable stack access alignment check for EL0
+ system_control_register_el1.SA = 1; // Enable stack access alignment check for EL1
+ system_control_register_el1.A = 1; // Enable memory access alignment check
+
+ Aarch64::SCTLR_EL1::write(system_control_register_el1);
+}
+
+void drop_to_exception_level_1()
+{
+ switch (Kernel::Aarch64::Asm::get_current_exception_level()) {
+ case Kernel::Aarch64::Asm::ExceptionLevel::EL3:
+ drop_to_el2();
+ [[fallthrough]];
+ case Kernel::Aarch64::Asm::ExceptionLevel::EL2:
+ drop_to_el1();
+ [[fallthrough]];
+ case Kernel::Aarch64::Asm::ExceptionLevel::EL1:
+ set_up_el1();
+ break;
+ default: {
+ Prekernel::panic("FATAL: CPU booted in unsupported exception mode!\r\n");
+ }
+ }
+}
+
+}
diff --git a/Kernel/Arch/aarch64/Prekernel/PrekernelMMU.cpp b/Kernel/Arch/aarch64/Prekernel/PrekernelMMU.cpp
new file mode 100644
index 0000000000..31ec573299
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/PrekernelMMU.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2021, James Mintram <me@jamesrm.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Types.h>
+
+#include <Kernel/Arch/aarch64/Prekernel.h>
+
+#include <Kernel/Arch/aarch64/ASM_wrapper.h>
+#include <Kernel/Arch/aarch64/MMIO.h>
+#include <Kernel/Arch/aarch64/Registers.h>
+#include <Kernel/Arch/aarch64/UART.h>
+
+// Documentation here for Aarch64 Address Translations
+// https://documentation-service.arm.com/static/5efa1d23dbdee951c1ccdec5?token=
+
+using namespace Kernel;
+
+// These come from the linker script
+extern u8 page_tables_phys_start[];
+extern u8 page_tables_phys_end[];
+
+namespace Prekernel {
+
+// physical memory
+constexpr u32 START_OF_NORMAL_MEMORY = 0x00000000;
+constexpr u32 END_OF_NORMAL_MEMORY = 0x3EFFFFFF;
+
+// 4KiB page size was chosen for the prekernel to make this code slightly simpler
+constexpr u32 GRANULE_SIZE = 0x1000;
+constexpr u32 PAGE_TABLE_SIZE = 0x1000;
+
+// Documentation for translation table format
+// https://developer.arm.com/documentation/101811/0101/Controlling-address-translation
+constexpr u32 PAGE_DESCRIPTOR = 0b11;
+constexpr u32 TABLE_DESCRIPTOR = 0b11;
+constexpr u32 DESCRIPTOR_MASK = ~0b11;
+
+constexpr u32 ACCESS_FLAG = 1 << 10;
+
+// shareability
+constexpr u32 OUTER_SHAREABLE = (2 << 8);
+constexpr u32 INNER_SHAREABLE = (3 << 8);
+
+// these index into the MAIR attribute table
+constexpr u32 NORMAL_MEMORY = (0 << 2);
+constexpr u32 DEVICE_MEMORY = (1 << 2);
+
+ALWAYS_INLINE static u64* descriptor_to_pointer(FlatPtr descriptor)
+{
+ return (u64*)(descriptor & DESCRIPTOR_MASK);
+}
+
+namespace {
+class PageBumpAllocator {
+public:
+ PageBumpAllocator(u64* start, u64* end)
+ : m_start(start)
+ , m_end(end)
+ , m_current(start)
+ {
+ if (m_start >= m_end) {
+ Prekernel::panic("Invalid memory range passed to PageBumpAllocator");
+ }
+ if ((FlatPtr)m_start % PAGE_TABLE_SIZE != 0 || (FlatPtr)m_end % PAGE_TABLE_SIZE != 0) {
+ Prekernel::panic("Memory range passed into PageBumpAllocator not aligned to PAGE_TABLE_SIZE");
+ }
+ }
+
+ u64* take_page()
+ {
+ if (m_current == m_end) {
+ Prekernel::panic("Prekernel pagetable memory exhausted");
+ }
+
+ u64* page = m_current;
+ m_current += (PAGE_TABLE_SIZE / sizeof(FlatPtr));
+
+ zero_page(page);
+ return page;
+ }
+
+private:
+ void zero_page(u64* page)
+ {
+ // Memset all page table memory to zero
+ for (u64* p = page; p < page + (PAGE_TABLE_SIZE / sizeof(u64)); p++) {
+ *p = 0;
+ }
+ }
+
+ u64 const* m_start;
+ u64 const* m_end;
+ u64* m_current;
+};
+}
+
+static void insert_identity_entries_for_physical_memory_range(PageBumpAllocator& allocator, u64* page_table, FlatPtr start, FlatPtr end, u64 flags)
+{
+ // Not very efficient, but simple and it works.
+ for (FlatPtr addr = start; addr < end; addr += GRANULE_SIZE) {
+ // Each level has 9 bits (512 entries)
+ u64 level0_idx = (addr >> 39) & 0x1FF;
+ u64 level1_idx = (addr >> 30) & 0x1FF;
+ u64 level2_idx = (addr >> 21) & 0x1FF;
+ u64 level3_idx = (addr >> 12) & 0x1FF;
+
+ u64* level1_table = page_table;
+
+ if (level1_table[level0_idx] == 0) {
+ level1_table[level0_idx] = (FlatPtr)allocator.take_page();
+ level1_table[level0_idx] |= TABLE_DESCRIPTOR;
+ }
+
+ u64* level2_table = descriptor_to_pointer(level1_table[level0_idx]);
+
+ if (level2_table[level1_idx] == 0) {
+ level2_table[level1_idx] = (FlatPtr)allocator.take_page();
+ level2_table[level1_idx] |= TABLE_DESCRIPTOR;
+ }
+
+ u64* level3_table = descriptor_to_pointer(level2_table[level1_idx]);
+
+ if (level3_table[level2_idx] == 0) {
+ level3_table[level2_idx] = (FlatPtr)allocator.take_page();
+ level3_table[level2_idx] |= TABLE_DESCRIPTOR;
+ }
+
+ u64* level4_table = descriptor_to_pointer(level3_table[level2_idx]);
+ u64* l4_entry = &level4_table[level3_idx];
+ *l4_entry = addr;
+ *l4_entry |= flags;
+ }
+}
+
+static void build_identity_map(PageBumpAllocator& allocator)
+{
+ u64* level1_table = allocator.take_page();
+
+ u64 normal_memory_flags = ACCESS_FLAG | PAGE_DESCRIPTOR | INNER_SHAREABLE | NORMAL_MEMORY;
+ u64 device_memory_flags = ACCESS_FLAG | PAGE_DESCRIPTOR | OUTER_SHAREABLE | DEVICE_MEMORY;
+
+ insert_identity_entries_for_physical_memory_range(allocator, level1_table, START_OF_NORMAL_MEMORY, END_OF_NORMAL_MEMORY, normal_memory_flags);
+ insert_identity_entries_for_physical_memory_range(allocator, level1_table, MMIO::the().peripheral_base_address(), MMIO::the().peripheral_end_address(), device_memory_flags);
+}
+
+static void switch_to_page_table(u8* page_table)
+{
+ Aarch64::Asm::set_ttbr0_el1((FlatPtr)page_table);
+ Aarch64::Asm::set_ttbr1_el1((FlatPtr)page_table);
+}
+
+static void activate_mmu()
+{
+ Aarch64::MAIR_EL1 mair_el1 = {};
+ mair_el1.Attr[0] = 0xFF; // Normal memory
+ mair_el1.Attr[1] = 0b00000100; // Device-nGnRE memory (non-cacheble)
+ Aarch64::MAIR_EL1::write(mair_el1);
+
+ // Configure cacheability attributes for memory associated with translation table walks
+ Aarch64::TCR_EL1 tcr_el1 = {};
+
+ tcr_el1.SH1 = Aarch64::TCR_EL1::InnerShareable;
+ tcr_el1.ORGN1 = Aarch64::TCR_EL1::NormalMemory_Outer_WriteBack_ReadAllocate_WriteAllocateCacheable;
+ tcr_el1.IRGN1 = Aarch64::TCR_EL1::NormalMemory_Inner_WriteBack_ReadAllocate_WriteAllocateCacheable;
+
+ tcr_el1.SH0 = Aarch64::TCR_EL1::InnerShareable;
+ tcr_el1.ORGN0 = Aarch64::TCR_EL1::NormalMemory_Outer_WriteBack_ReadAllocate_WriteAllocateCacheable;
+ tcr_el1.IRGN0 = Aarch64::TCR_EL1::NormalMemory_Inner_WriteBack_ReadAllocate_WriteAllocateCacheable;
+
+ tcr_el1.TG1 = Aarch64::TCR_EL1::TG1GranuleSize::Size_4KB;
+ tcr_el1.TG0 = Aarch64::TCR_EL1::TG0GranuleSize::Size_4KB;
+
+ // Auto detect the Intermediate Physical Address Size
+ Aarch64::ID_AA64MMFR0_EL1 feature_register = Aarch64::ID_AA64MMFR0_EL1::read();
+ tcr_el1.IPS = feature_register.PARange;
+
+ Aarch64::TCR_EL1::write(tcr_el1);
+
+ // Enable MMU in the system control register
+ Aarch64::SCTLR_EL1 sctlr_el1 = Aarch64::SCTLR_EL1::read();
+ sctlr_el1.M = 1; // Enable MMU
+ Aarch64::SCTLR_EL1::write(sctlr_el1);
+
+ Aarch64::Asm::flush();
+}
+
+void init_prekernel_page_tables()
+{
+ PageBumpAllocator allocator((u64*)page_tables_phys_start, (u64*)page_tables_phys_end);
+ build_identity_map(allocator);
+ switch_to_page_table(page_tables_phys_start);
+ activate_mmu();
+}
+
+}
diff --git a/Kernel/Arch/aarch64/Prekernel/boot.S b/Kernel/Arch/aarch64/Prekernel/boot.S
new file mode 100644
index 0000000000..4c44b5288a
--- /dev/null
+++ b/Kernel/Arch/aarch64/Prekernel/boot.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021, Nico Weber <thakis@chromium.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+// In a specially-named text section so that the linker script can put it first in .text.
+.section ".text.first"
+
+.global start
+.type start, @function
+start:
+ // Let only core 0 continue, put other cores to sleep.
+ mrs x13, MPIDR_EL1
+ and x13, x13, 0xff
+ cbnz x13, _ZN9Prekernel4haltEv
+
+ // Let stack start before .text for now.
+ // 512 kiB (0x80000) of stack are probably not sufficient, especially once we give the other cores some stack too,
+ // but for now it's ok.
+ msr SPSel, #0 //Use the same SP as we descend into EL1
+ ldr x14, =start
+ mov sp, x14
+
+ // Clear BSS.
+ ldr x14, =start_of_bss
+ ldr x15, =size_of_bss_divided_by_8
+Lbss_clear_loop:
+ str xzr, [x14], #8
+ subs x15, x15, #1
+ bne Lbss_clear_loop
+
+ b init