summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Arch/i386/CPU.cpp11
-rw-r--r--Kernel/Arch/i386/CPU.h3
-rw-r--r--Kernel/VM/PageDirectory.cpp34
3 files changed, 44 insertions, 4 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 9d3d5aa7e1..bd227e061a 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -916,6 +916,7 @@ u32 read_cr3()
void write_cr3(u32 cr3)
{
+ // NOTE: If you're here from a GPF crash, it's very likely that a PDPT entry is incorrect, not this!
asm volatile("movl %%eax, %%cr3" ::"a"(cr3)
: "memory");
}
@@ -1033,6 +1034,15 @@ void Processor::cpu_detect()
}
}
+ if (max_extended_leaf >= 0x80000008) {
+ // CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
+ CPUID cpuid(0x80000008);
+ m_physical_address_bit_width = cpuid.eax() & 0xff;
+ } else {
+ // For processors that do not support CPUID function 80000008H, the width is generally 36 if CPUID.01H:EDX.PAE [bit 6] = 1 and 32 otherwise.
+ m_physical_address_bit_width = has_feature(CPUFeature::PAE) ? 36 : 32;
+ }
+
CPUID extended_features(0x7);
if (extended_features.ebx() & (1 << 20))
set_feature(CPUFeature::SMAP);
@@ -1218,6 +1228,7 @@ void Processor::initialize(u32 cpu)
klog() << "CPU[" << id() << "]: Supported features: " << features_string();
if (!has_feature(CPUFeature::RDRAND))
klog() << "CPU[" << id() << "]: No RDRAND support detected, randomness will be poor";
+ klog() << "CPU[" << id() << "]: Physical address bit width: " << m_physical_address_bit_width;
if (cpu == 0)
idt_init();
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index c5883caa51..d9a36f5c32 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -721,6 +721,7 @@ class Processor {
static FPUState s_clean_fpu_state;
CPUFeature m_features;
static volatile u32 g_total_processors; // atomic
+ u8 m_physical_address_bit_width;
ProcessorInfo* m_info;
MemoryManagerData* m_mm_data;
@@ -812,6 +813,8 @@ public:
return IterationDecision::Continue;
}
+ ALWAYS_INLINE u8 physical_address_bit_width() const { return m_physical_address_bit_width; }
+
ALWAYS_INLINE ProcessorInfo& info() { return *m_info; }
ALWAYS_INLINE static Processor& current()
diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp
index f126ea95a5..b60a251351 100644
--- a/Kernel/VM/PageDirectory.cpp
+++ b/Kernel/VM/PageDirectory.cpp
@@ -95,10 +95,36 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
{
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table);
- table.raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
- table.raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
- table.raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
- table.raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
+ table.raw[0] = (FlatPtr)m_directory_pages[0]->paddr().as_ptr() | 1;
+ table.raw[1] = (FlatPtr)m_directory_pages[1]->paddr().as_ptr() | 1;
+ table.raw[2] = (FlatPtr)m_directory_pages[2]->paddr().as_ptr() | 1;
+ table.raw[3] = (FlatPtr)m_directory_pages[3]->paddr().as_ptr() | 1;
+
+ // 2 ** MAXPHYADDR - 1
+ // Where MAXPHYADDR = physical_address_bit_width
+ u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
+
+ // bit 63 = no execute
+ // bit 7 = page size
+ // bit 5 = accessed
+ // bit 4 = cache disable
+ // bit 3 = write through
+ // bit 2 = user/supervisor
+ // bit 1 = read/write
+ // bit 0 = present
+ constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
+
+ // This is to notify us of bugs where we're:
+ // 1. Going over what the processor is capable of.
+ // 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
+ // when writing out the PDPT pointer to CR3.
+ // The reason we're not checking the page directory's physical address directly is because
+ // we're checking for sign extension when putting it into a PDPTE. See issue #4584.
+ ASSERT((table.raw[0] & ~pdpte_bit_flags) <= max_physical_address);
+ ASSERT((table.raw[1] & ~pdpte_bit_flags) <= max_physical_address);
+ ASSERT((table.raw[2] & ~pdpte_bit_flags) <= max_physical_address);
+ ASSERT((table.raw[3] & ~pdpte_bit_flags) <= max_physical_address);
+
MM.unquickmap_page();
}