summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorJames Mintram <me@jamesrm.com>2021-11-21 00:56:05 +0000
committerBrian Gianforcaro <b.gianfo@gmail.com>2021-11-28 22:01:21 -0800
commit4e9777243ee8ff29141c01e73a88a5b5cd6010e3 (patch)
treeede5a73dd7bc845f9867e67f4d8f8fa4be3f793d /Kernel
parent4a4a3193f84b53673b63d6ad520d98108c0710a6 (diff)
downloadserenity-4e9777243ee8ff29141c01e73a88a5b5cd6010e3.zip
Kernel: Refactor prekernel MMU to use a bump allocator
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Prekernel/Arch/aarch64/PrekernelMMU.cpp85
1 files changed, 64 insertions, 21 deletions
diff --git a/Kernel/Prekernel/Arch/aarch64/PrekernelMMU.cpp b/Kernel/Prekernel/Arch/aarch64/PrekernelMMU.cpp
index 3ef8d6170a..458dea7a6a 100644
--- a/Kernel/Prekernel/Arch/aarch64/PrekernelMMU.cpp
+++ b/Kernel/Prekernel/Arch/aarch64/PrekernelMMU.cpp
@@ -37,6 +37,7 @@ constexpr u32 PAGE_TABLE_SIZE = 0x1000;
// https://developer.arm.com/documentation/101811/0101/Controlling-address-translation
constexpr u32 PAGE_DESCRIPTOR = 0b11;
constexpr u32 TABLE_DESCRIPTOR = 0b11;
+constexpr u32 DESCRIPTOR_MASK = ~0b11;
constexpr u32 ACCESS_FLAG = 1 << 10;
@@ -48,44 +49,85 @@ constexpr u32 INNER_SHAREABLE = (3 << 8);
constexpr u32 NORMAL_MEMORY = (0 << 2);
constexpr u32 DEVICE_MEMORY = (1 << 2);
+constexpr u64* descriptor_to_pointer(u64 descriptor)
+{
+ return (u64*)(descriptor & DESCRIPTOR_MASK);
+}
+
using page_table_t = u8*;
-static void zero_identity_map(page_table_t page_table_start, page_table_t page_table_end)
+static void zero_pages(u64* start, u64* end)
{
// Memset all page table memory to zero
- for (uint64_t* p = (uint64_t*)page_table_start;
- p < (uint64_t*)page_table_end;
+ for (u64* p = (u64*)start;
+ p < (u64*)end;
p++) {
*p = 0;
}
}
-static void build_identity_map(page_table_t page_table)
+namespace {
+class PageBumpAllocator {
+public:
+ PageBumpAllocator(u64* start, u64* end)
+ : m_start(start)
+ , m_end(end)
+ , m_current(start)
+ {
+ if (m_start >= m_end) {
+ Prekernel::panic("Invalid memory range passed to PageBumpAllocator");
+ }
+ if ((u64)m_start % PAGE_TABLE_SIZE != 0 || (u64)m_end % PAGE_TABLE_SIZE != 0) {
+ Prekernel::panic("Memory range passed into PageBumpAllocator not aligned to PAGE_TABLE_SIZE");
+ }
+ }
+
+ u64* take_page()
+ {
+ if (m_current == m_end) {
+ Prekernel::panic("Prekernel pagetable memory exhausted");
+ }
+
+ u64* page = m_current;
+ m_current += (PAGE_TABLE_SIZE / sizeof(u64));
+
+ zero_pages(page, page + (PAGE_TABLE_SIZE / sizeof(u64)));
+ return page;
+ }
+
+private:
+ const u64* m_start;
+ const u64* m_end;
+ u64* m_current;
+};
+}
+
+static void build_identity_map(PageBumpAllocator& allocator)
{
- // Set up first entry of level 1
- uint64_t* level1_entry = (uint64_t*)page_table;
- *level1_entry = (uint64_t)&page_table[PAGE_TABLE_SIZE];
- *level1_entry |= TABLE_DESCRIPTOR;
+ u64* level1_table = allocator.take_page();
- // Set up first entry of level 2
- uint64_t* level2_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE];
- *level2_entry = (uint64_t)&page_table[PAGE_TABLE_SIZE * 2];
- *level2_entry |= TABLE_DESCRIPTOR;
+ level1_table[0] = (u64)allocator.take_page();
+ level1_table[0] |= TABLE_DESCRIPTOR;
- // Set up L3 entries
- for (uint32_t l3_idx = 0; l3_idx < 512; l3_idx++) {
- uint64_t* l3_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE * 2 + (l3_idx * sizeof(uint64_t))];
+ u64* level2_table = descriptor_to_pointer(level1_table[0]);
+ level2_table[0] = (u64)allocator.take_page();
+ level2_table[0] |= TABLE_DESCRIPTOR;
+
+ u64* level3_table = descriptor_to_pointer(level2_table[0]);
- *l3_entry = (uint64_t)(page_table + (PAGE_TABLE_SIZE * 3) + (l3_idx * PAGE_TABLE_SIZE));
- *l3_entry |= TABLE_DESCRIPTOR;
+ // // Set up L3 entries
+ for (uint32_t l3_idx = 0; l3_idx < 512; l3_idx++) {
+ level3_table[l3_idx] = (u64)allocator.take_page();
+ level3_table[l3_idx] |= TABLE_DESCRIPTOR;
}
// Set up L4 entries
size_t page_index = 0;
for (size_t addr = START_OF_NORMAL_MEMORY; addr < END_OF_NORMAL_MEMORY; addr += GRANULE_SIZE, page_index++) {
- uint64_t* l4_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE * 3 + (page_index * sizeof(uint64_t))];
+ u64* level4_table = descriptor_to_pointer(level3_table[page_index / 512]);
+ u64* l4_entry = &level4_table[page_index % 512];
*l4_entry = addr;
*l4_entry |= ACCESS_FLAG;
@@ -96,7 +138,8 @@ static void build_identity_map(page_table_t page_table)
// Set up entries for last 16MB of memory (MMIO)
for (size_t addr = START_OF_DEVICE_MEMORY; addr < END_OF_DEVICE_MEMORY; addr += GRANULE_SIZE, page_index++) {
- uint64_t* l4_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE * 3 + (page_index * sizeof(uint64_t))];
+ u64* level4_table = descriptor_to_pointer(level3_table[page_index / 512]);
+ u64* l4_entry = &level4_table[page_index % 512];
*l4_entry = addr;
*l4_entry |= ACCESS_FLAG;
@@ -149,8 +192,8 @@ static void activate_mmu()
void init_prekernel_page_tables()
{
- zero_identity_map(page_tables_phys_start, page_tables_phys_end);
- build_identity_map(page_tables_phys_start);
+ PageBumpAllocator allocator((u64*)page_tables_phys_start, (u64*)page_tables_phys_end);
+ build_identity_map(allocator);
switch_to_page_table(page_tables_phys_start);
activate_mmu();
}