/* * Copyright (c) 2018-2021, Andreas Kling * Copyright (c) 2022, Timon Kruiper * * SPDX-License-Identifier: BSD-2-Clause */ #pragma once #include #include #include #include #include #include #include #include #include #include namespace Kernel::Memory { // 4KiB page size was chosen to make this code slightly simpler constexpr u32 GRANULE_SIZE = 0x1000; constexpr u32 PAGE_TABLE_SIZE = 0x1000; // Documentation for translation table format // https://developer.arm.com/documentation/101811/0101/Controlling-address-translation constexpr u32 PAGE_DESCRIPTOR = 0b11; constexpr u32 TABLE_DESCRIPTOR = 0b11; constexpr u32 DESCRIPTOR_MASK = ~0b11; constexpr u32 ACCESS_FLAG = 1 << 10; // shareability constexpr u32 OUTER_SHAREABLE = (2 << 8); constexpr u32 INNER_SHAREABLE = (3 << 8); // these index into the MAIR attribute table constexpr u32 NORMAL_MEMORY = (0 << 2); constexpr u32 DEVICE_MEMORY = (1 << 2); constexpr u32 ACCESS_PERMISSION_EL0 = (1 << 6); constexpr u32 ACCESS_PERMISSION_READONLY = (1 << 7); // Figure D5-15 of Arm Architecture Reference Manual Armv8 - page D5-2588 class PageDirectoryEntry { public: PhysicalPtr page_table_base() const { return PhysicalAddress::physical_page_base(m_raw); } void set_page_table_base(PhysicalPtr value) { m_raw &= 0xffff000000000fffULL; m_raw |= PhysicalAddress::physical_page_base(value); // FIXME: Do not hardcode this. m_raw |= TABLE_DESCRIPTOR; } bool is_null() const { return m_raw == 0; } void clear() { m_raw = 0; } u64 raw() const { return m_raw; } void copy_from(Badge, PageDirectoryEntry const& other) { m_raw = other.m_raw; } enum Flags { Present = 1 << 0, }; bool is_present() const { return (raw() & Present) == Present; } void set_present(bool) { } bool is_user_allowed() const { TODO_AARCH64(); } void set_user_allowed(bool) { } bool is_huge() const { TODO_AARCH64(); } void set_huge(bool) { } bool is_writable() const { TODO_AARCH64(); } void set_writable(bool) { } bool is_write_through() const { TODO_AARCH64(); } void set_write_through(bool) { } bool is_cache_disabled() const { TODO_AARCH64(); } void set_cache_disabled(bool) { } bool is_global() const { TODO_AARCH64(); } void set_global(bool) { } bool is_execute_disabled() const { TODO_AARCH64(); } void set_execute_disabled(bool) { } private: void set_bit(u64 bit, bool value) { if (value) m_raw |= bit; else m_raw &= ~bit; } u64 m_raw; }; // Figure D5-17 VMSAv8-64 level 3 descriptor format of Arm Architecture Reference Manual Armv8 - page D5-2592 class PageTableEntry { public: PhysicalPtr physical_page_base() const { return PhysicalAddress::physical_page_base(m_raw); } void set_physical_page_base(PhysicalPtr value) { m_raw &= 0xffff000000000fffULL; m_raw |= PhysicalAddress::physical_page_base(value); // FIXME: For now we map everything with the same permissions. u64 normal_memory_flags = ACCESS_FLAG | PAGE_DESCRIPTOR | INNER_SHAREABLE | NORMAL_MEMORY; m_raw |= normal_memory_flags; } u64 raw() const { return m_raw; } enum Flags { Present = 1 << 0, }; bool is_present() const { return (raw() & Present) == Present; } void set_present(bool b) { set_bit(Present, b); } bool is_user_allowed() const { return (raw() & ACCESS_PERMISSION_EL0) == ACCESS_PERMISSION_EL0; } void set_user_allowed(bool b) { set_bit(ACCESS_PERMISSION_EL0, b); } bool is_writable() const { return !((raw() & ACCESS_PERMISSION_READONLY) == ACCESS_PERMISSION_READONLY); } void set_writable(bool b) { set_bit(ACCESS_PERMISSION_READONLY, !b); } bool is_write_through() const { TODO_AARCH64(); } void set_write_through(bool) { } bool is_cache_disabled() const { TODO_AARCH64(); } void set_cache_disabled(bool) { } bool is_global() const { TODO_AARCH64(); } void set_global(bool) { } bool is_execute_disabled() const { TODO_AARCH64(); } void set_execute_disabled(bool) { } bool is_pat() const { TODO_AARCH64(); } void set_pat(bool) { } bool is_null() const { return m_raw == 0; } void clear() { m_raw = 0; } private: void set_bit(u64 bit, bool value) { if (value) m_raw |= bit; else m_raw &= ~bit; } u64 m_raw; }; static_assert(AssertSize()); static_assert(AssertSize()); class PageDirectoryPointerTable { public: PageDirectoryEntry* directory(size_t index) { VERIFY(index <= (NumericLimits::max() << 30)); return (PageDirectoryEntry*)(PhysicalAddress::physical_page_base(raw[index])); } u64 raw[512]; }; class PageDirectory final : public AtomicRefCounted { friend class MemoryManager; public: static ErrorOr> try_create_for_userspace(); static NonnullLockRefPtr must_create_kernel_page_directory(); static LockRefPtr find_current(); ~PageDirectory(); void allocate_kernel_directory(); FlatPtr ttbr0() const { return m_root_table->paddr().get(); } bool is_root_table_initialized() const { return m_root_table; } AddressSpace* address_space() { return m_space; } AddressSpace const* address_space() const { return m_space; } void set_space(Badge, AddressSpace& space) { m_space = &space; } RecursiveSpinlock& get_lock() { return m_lock; } // This has to be public to let the global singleton access the member pointer IntrusiveRedBlackTreeNode> m_tree_node; private: PageDirectory(); static void register_page_directory(PageDirectory* directory); static void deregister_page_directory(PageDirectory* directory); AddressSpace* m_space { nullptr }; RefPtr m_root_table; RefPtr m_directory_table; RefPtr m_directory_pages[512]; RecursiveSpinlock m_lock {}; }; void activate_kernel_page_directory(PageDirectory const& pgd); void activate_page_directory(PageDirectory const& pgd, Thread* current_thread); }