summaryrefslogtreecommitdiff
path: root/Kernel/Arch/x86
diff options
context:
space:
mode:
authorGunnar Beutner <gbeutner@serenityos.org>2021-06-23 21:54:41 +0200
committerAndreas Kling <kling@serenityos.org>2021-06-24 09:27:13 +0200
commit38fca26f542bcc9fa68755323ac4eadb5ecd4586 (patch)
treee39f3f0a04dff799a41823fdb30a8067b23aa98c /Kernel/Arch/x86
parentf2eb759901f1fb7c96654b2461ae8fc348e250bb (diff)
downloadserenity-38fca26f542bcc9fa68755323ac4eadb5ecd4586.zip
Kernel: Add stubs for missing x86_64 functionality
This adds just enough stubs to make the kernel compile on x86_64. Obviously it won't do anything useful - in fact it won't even attempt to boot because Multiboot doesn't support ELF64 binaries - but it gets those compiler errors out of the way so more progress can be made getting all the missing functionality in place.
Diffstat (limited to 'Kernel/Arch/x86')
-rw-r--r--Kernel/Arch/x86/Processor.h2
-rw-r--r--Kernel/Arch/x86/common/CPU.cpp42
-rw-r--r--Kernel/Arch/x86/common/Processor.cpp12
-rw-r--r--Kernel/Arch/x86/common/ProcessorInfo.cpp (renamed from Kernel/Arch/x86/i386/ProcessorInfo.cpp)0
-rw-r--r--Kernel/Arch/x86/i386/CPU.cpp29
-rw-r--r--Kernel/Arch/x86/i386/InterruptEntry.cpp1
-rw-r--r--Kernel/Arch/x86/i386/Processor.cpp2
-rw-r--r--Kernel/Arch/x86/x86_64/Boot/boot.S3
-rw-r--r--Kernel/Arch/x86/x86_64/CPU.cpp43
-rw-r--r--Kernel/Arch/x86/x86_64/InterruptEntry.cpp24
-rw-r--r--Kernel/Arch/x86/x86_64/Processor.cpp9
-rw-r--r--Kernel/Arch/x86/x86_64/SafeMem.cpp89
12 files changed, 221 insertions, 35 deletions
diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h
index 200c5831ab..5488556d4a 100644
--- a/Kernel/Arch/x86/Processor.h
+++ b/Kernel/Arch/x86/Processor.h
@@ -289,7 +289,7 @@ public:
ALWAYS_INLINE static Thread* idle_thread()
{
// See comment in Processor::current_thread
- return (Thread*)read_fs_u32(__builtin_offsetof(Processor, m_idle_thread));
+ return (Thread*)read_fs_ptr(__builtin_offsetof(Processor, m_idle_thread));
}
ALWAYS_INLINE u32 get_id() const
diff --git a/Kernel/Arch/x86/common/CPU.cpp b/Kernel/Arch/x86/common/CPU.cpp
new file mode 100644
index 0000000000..fcfd4be5fd
--- /dev/null
+++ b/Kernel/Arch/x86/common/CPU.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Assertions.h>
+#include <AK/Types.h>
+#include <Kernel/Arch/x86/CPU.h>
+#include <Kernel/Arch/x86/Processor.h>
+#include <Kernel/Arch/x86/TrapFrame.h>
+#include <Kernel/KSyms.h>
+#include <Kernel/Process.h>
+
+void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
+{
+ asm volatile("cli");
+ critical_dmesgln("ASSERTION FAILED: {}", msg);
+ critical_dmesgln("{}:{} in {}", file, line, func);
+
+ abort();
+}
+
+[[noreturn]] void abort()
+{
+ // Switch back to the current process's page tables if there are any.
+ // Otherwise stack walking will be a disaster.
+ auto process = Process::current();
+ if (process)
+ MM.enter_process_paging_scope(*process);
+
+ Kernel::dump_backtrace();
+ Processor::halt();
+
+ abort();
+}
+
+[[noreturn]] void _abort()
+{
+ asm volatile("ud2");
+ __builtin_unreachable();
+}
diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp
index 96281902d7..00efca9e9f 100644
--- a/Kernel/Arch/x86/common/Processor.cpp
+++ b/Kernel/Arch/x86/common/Processor.cpp
@@ -485,7 +485,13 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// to be ebp.
ProcessPagingScope paging_scope(thread.process());
auto& tss = thread.tss();
- u32* stack_top = reinterpret_cast<u32*>(tss.esp);
+ u32* stack_top;
+#if ARCH(I386)
+ stack_top = reinterpret_cast<u32*>(tss.esp);
+#else
+ (void)tss;
+ TODO();
+#endif
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
@@ -494,7 +500,11 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
frame_ptr = 0;
}
+#if ARCH(I386)
eip = tss.eip;
+#else
+ TODO();
+#endif
// TODO: We need to leave the scheduler lock here, but we also
// need to prevent the target thread from being run while
// we walk the stack
diff --git a/Kernel/Arch/x86/i386/ProcessorInfo.cpp b/Kernel/Arch/x86/common/ProcessorInfo.cpp
index 3cb24ba64e..3cb24ba64e 100644
--- a/Kernel/Arch/x86/i386/ProcessorInfo.cpp
+++ b/Kernel/Arch/x86/common/ProcessorInfo.cpp
diff --git a/Kernel/Arch/x86/i386/CPU.cpp b/Kernel/Arch/x86/i386/CPU.cpp
index 2b464c37ac..99296b7dee 100644
--- a/Kernel/Arch/x86/i386/CPU.cpp
+++ b/Kernel/Arch/x86/i386/CPU.cpp
@@ -101,32 +101,3 @@ extern "C" u32 do_init_context(Thread* thread, u32 flags)
}
}
-
-void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
-{
- asm volatile("cli");
- critical_dmesgln("ASSERTION FAILED: {}", msg);
- critical_dmesgln("{}:{} in {}", file, line, func);
-
- abort();
-}
-
-[[noreturn]] void abort()
-{
- // Switch back to the current process's page tables if there are any.
- // Otherwise stack walking will be a disaster.
- auto process = Process::current();
- if (process)
- MM.enter_process_paging_scope(*process);
-
- Kernel::dump_backtrace();
- Processor::halt();
-
- abort();
-}
-
-[[noreturn]] void _abort()
-{
- asm volatile("ud2");
- __builtin_unreachable();
-}
diff --git a/Kernel/Arch/x86/i386/InterruptEntry.cpp b/Kernel/Arch/x86/i386/InterruptEntry.cpp
index 6a52802b7c..0df2fd6533 100644
--- a/Kernel/Arch/x86/i386/InterruptEntry.cpp
+++ b/Kernel/Arch/x86/i386/InterruptEntry.cpp
@@ -6,6 +6,7 @@
#include <Kernel/Arch/x86/DescriptorTable.h>
#include <Kernel/Arch/x86/TrapFrame.h>
+
// clang-format off
asm(
".globl interrupt_common_asm_entry\n"
diff --git a/Kernel/Arch/x86/i386/Processor.cpp b/Kernel/Arch/x86/i386/Processor.cpp
index dd08a0813a..1432a94b02 100644
--- a/Kernel/Arch/x86/i386/Processor.cpp
+++ b/Kernel/Arch/x86/i386/Processor.cpp
@@ -37,7 +37,6 @@ asm(
);
// clang-format on
-#if ARCH(I386)
// clang-format off
asm(
".global do_assume_context \n"
@@ -59,7 +58,6 @@ asm(
" jmp enter_thread_context \n"
);
// clang-format on
-#endif
String Processor::platform_string() const
{
diff --git a/Kernel/Arch/x86/x86_64/Boot/boot.S b/Kernel/Arch/x86/x86_64/Boot/boot.S
index 8e1d9d27ba..3a6f624dba 100644
--- a/Kernel/Arch/x86/x86_64/Boot/boot.S
+++ b/Kernel/Arch/x86/x86_64/Boot/boot.S
@@ -317,7 +317,8 @@ apic_ap_start32:
movl $0x80000001, %eax
cpuid
testl $0x100000, %edx
- je (1f - apic_ap_start + 0x8000)
+ // TODO: Uncomment this
+ //je (1f - apic_ap_start + 0x8000)
/* turn on IA32_EFER.NXE */
movl $0xc0000080, %ecx
rdmsr
diff --git a/Kernel/Arch/x86/x86_64/CPU.cpp b/Kernel/Arch/x86/x86_64/CPU.cpp
new file mode 100644
index 0000000000..8d9c0954d3
--- /dev/null
+++ b/Kernel/Arch/x86/x86_64/CPU.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Assertions.h>
+#include <AK/Types.h>
+
+#include <Kernel/Arch/x86/CPU.h>
+#include <Kernel/Arch/x86/Processor.h>
+#include <Kernel/Arch/x86/TrapFrame.h>
+#include <Kernel/KSyms.h>
+#include <Kernel/Process.h>
+#include <Kernel/Thread.h>
+
+namespace Kernel {
+
+// The compiler can't see the calls to these functions inside assembly.
+// Declare them, to avoid dead code warnings.
+extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
+extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
+extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
+
+extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
+{
+ (void)from_thread;
+ (void)to_thread;
+ TODO();
+}
+
+extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
+{
+ TODO();
+}
+
+extern "C" u32 do_init_context(Thread* thread, u32 flags)
+{
+ (void)thread;
+ (void)flags;
+ TODO();
+}
+}
diff --git a/Kernel/Arch/x86/x86_64/InterruptEntry.cpp b/Kernel/Arch/x86/x86_64/InterruptEntry.cpp
new file mode 100644
index 0000000000..991583929f
--- /dev/null
+++ b/Kernel/Arch/x86/x86_64/InterruptEntry.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ * Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <Kernel/Arch/x86/DescriptorTable.h>
+#include <Kernel/Arch/x86/TrapFrame.h>
+
+// clang-format off
+asm(
+".globl interrupt_common_asm_entry\n"
+"interrupt_common_asm_entry: \n"
+" int3 \n" // FIXME
+".globl common_trap_exit \n"
+"common_trap_exit: \n"
+// another thread may have handled this trap at this point, so don't
+// make assumptions about the stack other than there's a TrapFrame
+// and a pointer to it.
+" call exit_trap \n"
+" int3 \n" // FIXME
+);
+// clang-format on
diff --git a/Kernel/Arch/x86/x86_64/Processor.cpp b/Kernel/Arch/x86/x86_64/Processor.cpp
index 7db6c9ad4c..01c41a0481 100644
--- a/Kernel/Arch/x86/x86_64/Processor.cpp
+++ b/Kernel/Arch/x86/x86_64/Processor.cpp
@@ -88,6 +88,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// TODO: handle NT?
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
+#if 0
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
@@ -116,7 +117,6 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// However, the first step is to always start in kernel mode with thread_context_first_enter
RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
// FIXME: copy state to be recovered through TSS
- TODO();
// make space for a trap frame
stack_top -= sizeof(TrapFrame);
@@ -161,6 +161,9 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
tss.gs = GDT_SELECTOR_DATA0;
tss.ss = GDT_SELECTOR_DATA0;
tss.fs = GDT_SELECTOR_PROC;
+#else
+ TODO();
+#endif
return stack_top;
}
@@ -202,11 +205,15 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
auto& tss = initial_thread.tss();
m_tss = tss;
+#if 0
m_tss.esp0 = tss.esp0;
m_tss.ss0 = GDT_SELECTOR_DATA0;
// user mode needs to be able to switch to kernel mode:
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3;
+#else
+ TODO();
+#endif
m_scheduler_initialized = true;
diff --git a/Kernel/Arch/x86/x86_64/SafeMem.cpp b/Kernel/Arch/x86/x86_64/SafeMem.cpp
new file mode 100644
index 0000000000..bbc622e771
--- /dev/null
+++ b/Kernel/Arch/x86/x86_64/SafeMem.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <Kernel/Arch/x86/SafeMem.h>
+
+#define CODE_SECTION(section_name) __attribute__((section(section_name)))
+
+namespace Kernel {
+
+CODE_SECTION(".text.safemem")
+NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
+{
+ (void)dest_ptr;
+ (void)src_ptr;
+ (void)n;
+ (void)fault_at;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem")
+NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
+{
+ (void)str;
+ (void)max_n;
+ (void)fault_at;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem")
+NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
+{
+ (void)dest_ptr;
+ (void)c;
+ (void)n;
+ (void)fault_at;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem.atomic")
+NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
+{
+ (void)var;
+ (void)val;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem.atomic")
+NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
+{
+ (void)var;
+ (void)val;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem.atomic")
+NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
+{
+ (void)var;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem.atomic")
+NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
+{
+ (void)var;
+ (void)val;
+ TODO();
+}
+
+CODE_SECTION(".text.safemem.atomic")
+NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
+{
+ (void)var;
+ (void)expected;
+ (void)val;
+ TODO();
+}
+
+bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
+{
+ (void)regs;
+ (void)fault_address;
+ TODO();
+}
+
+}