summaryrefslogtreecommitdiff
path: root/Kernel/Arch
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-03-04 17:50:05 +0100
committerAndreas Kling <kling@serenityos.org>2021-03-04 18:25:01 +0100
commitadb2e6be5f2858076f7c226c1d986db464fd4f0b (patch)
tree464926708344545ee1942b96d991570c163a7bbc /Kernel/Arch
parentaae91dda66f99cce81f6d21ffad46528d69a2194 (diff)
downloadserenity-adb2e6be5f2858076f7c226c1d986db464fd4f0b.zip
Kernel: Make the kernel compile & link for x86_64
It's now possible to build the whole kernel with an x86_64 toolchain. There's no bootstrap code so it doesn't work yet (obviously.)
Diffstat (limited to 'Kernel/Arch')
-rw-r--r--Kernel/Arch/i386/CPU.cpp32
-rw-r--r--Kernel/Arch/i386/CPU.h2
-rw-r--r--Kernel/Arch/i386/Interrupts.h32
-rw-r--r--Kernel/Arch/x86_64/Boot/boot.S183
4 files changed, 240 insertions, 9 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 6187bffc97..970483382d 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -76,6 +76,7 @@ extern "C" void handle_interrupt(TrapFrame*);
// clang-format off
+#if ARCH(I386)
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*); \
@@ -127,6 +128,26 @@ extern "C" void handle_interrupt(TrapFrame*);
" call " #title "_handler\n" \
" jmp common_trap_exit \n");
+#elif ARCH(X86_64)
+#define EH_ENTRY(ec, title) \
+ extern "C" void title##_asm_entry(); \
+ extern "C" void title##_handler(TrapFrame*); \
+ asm( \
+ ".globl " #title "_asm_entry\n" \
+ "" #title "_asm_entry: \n" \
+ " cli;hlt;\n" \
+);
+
+#define EH_ENTRY_NO_CODE(ec, title) \
+ extern "C" void title##_handler(TrapFrame*); \
+ extern "C" void title##_asm_entry(); \
+asm( \
+ ".globl " #title "_asm_entry\n" \
+ "" #title "_asm_entry: \n" \
+ " cli;hlt;\n" \
+);
+#endif
+
// clang-format on
static void dump(const RegisterState& regs)
@@ -1593,6 +1614,7 @@ extern "C" u32 do_init_context(Thread* thread, u32 flags)
extern "C" void do_assume_context(Thread* thread, u32 flags);
+#if ARCH(I386)
// clang-format off
asm(
".global do_assume_context \n"
@@ -1614,8 +1636,9 @@ asm(
" jmp enter_thread_context \n"
);
// clang-format on
+#endif
-void Processor::assume_context(Thread& thread, u32 flags)
+void Processor::assume_context(Thread& thread, FlatPtr flags)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
@@ -1624,7 +1647,12 @@ void Processor::assume_context(Thread& thread, u32 flags)
// in_critical() should be 2 here. The critical section in Process::exec
// and then the scheduler lock
VERIFY(Processor::current().in_critical() == 2);
+#if ARCH(I386)
do_assume_context(&thread, flags);
+#elif ARCH(X86_64)
+ (void)flags;
+ TODO();
+#endif
VERIFY_NOT_REACHED();
}
@@ -2315,12 +2343,14 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
: "memory");
set_fs(GDT_SELECTOR_PROC);
+#if ARCH(I386)
// Make sure CS points to the kernel code descriptor.
// clang-format off
asm volatile(
"ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
"sanity:\n");
// clang-format on
+#endif
}
void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_regs, const RegisterState& kernel_regs)
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index e343369a29..84ed4b05e5 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -1014,7 +1014,7 @@ public:
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
void switch_context(Thread*& from_thread, Thread*& to_thread);
- [[noreturn]] static void assume_context(Thread& thread, u32 flags);
+ [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
u32 init_context(Thread& thread, bool leave_crit);
static Vector<FlatPtr> capture_stack_trace(Thread& thread, size_t max_frames = 0);
diff --git a/Kernel/Arch/i386/Interrupts.h b/Kernel/Arch/i386/Interrupts.h
index 7d4e4b69ba..cfa457da16 100644
--- a/Kernel/Arch/i386/Interrupts.h
+++ b/Kernel/Arch/i386/Interrupts.h
@@ -32,13 +32,15 @@
extern "C" void interrupt_common_asm_entry();
-#define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \
- extern "C" void interrupt_##isr_number##_asm_entry(); \
- asm(".globl interrupt_" #isr_number "_asm_entry\n" \
- "interrupt_" #isr_number "_asm_entry:\n" \
- " pushw $" #isr_number "\n" \
- " pushw $0\n" \
- " jmp interrupt_common_asm_entry\n");
+#if ARCH(I386)
+
+# define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \
+ extern "C" void interrupt_##isr_number##_asm_entry(); \
+ asm(".globl interrupt_" #isr_number "_asm_entry\n" \
+ "interrupt_" #isr_number "_asm_entry:\n" \
+ " pushw $" #isr_number "\n" \
+ " pushw $0\n" \
+ " jmp interrupt_common_asm_entry\n");
// clang-format off
asm(
@@ -83,3 +85,19 @@ asm(
" iret\n"
);
// clang-format on
+
+#elif ARCH(X86_64)
+
+# define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \
+ extern "C" void interrupt_##isr_number##_asm_entry(); \
+ asm(".globl interrupt_" #isr_number "_asm_entry\n" \
+ "interrupt_" #isr_number "_asm_entry:\n" \
+ " cli\n" \
+ " hlt\n");
+
+asm(
+ ".globl common_trap_exit\n"
+ "common_trap_exit:\n"
+ " cli;hlt\n");
+
+#endif
diff --git a/Kernel/Arch/x86_64/Boot/boot.S b/Kernel/Arch/x86_64/Boot/boot.S
new file mode 100644
index 0000000000..1ed1cd9207
--- /dev/null
+++ b/Kernel/Arch/x86_64/Boot/boot.S
@@ -0,0 +1,183 @@
+.set MULTIBOOT_MAGIC, 0x1badb002
+.set MULTIBOOT_PAGE_ALIGN, 0x1
+.set MULTIBOOT_MEMORY_INFO, 0x2
+.set MULTIBOOT_VIDEO_MODE, 0x4
+.set multiboot_flags, MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO | MULTIBOOT_VIDEO_MODE
+.set multiboot_checksum, -(MULTIBOOT_MAGIC + multiboot_flags)
+
+.section .multiboot
+.align 4
+
+.long MULTIBOOT_MAGIC
+.long multiboot_flags
+.long multiboot_checksum
+
+
+/* for MULTIBOOT_MEMORY_INFO */
+.long 0x00000000 /* header_addr */
+.long 0x00000000 /* load_addr */
+.long 0x00000000 /* load_end_addr */
+.long 0x00000000 /* bss_end_addr */
+.long 0x00000000 /* entry_addr */
+
+/* for MULTIBOOT_VIDEO_MODE */
+.long 0x00000000 /* mode_type */
+.long 1280 /* width */
+.long 1024 /* height */
+.long 32 /* depth */
+
+.section .stack, "aw", @nobits
+stack_bottom:
+.skip 32768
+stack_top:
+
+.global kernel_cmdline
+kernel_cmdline:
+.skip 4096
+
+.section .page_tables, "aw", @nobits
+.align 4096
+.global boot_pdpt
+boot_pdpt:
+.skip 4096
+.global boot_pd0
+boot_pd0:
+.skip 4096
+.global boot_pd3
+boot_pd3:
+.skip 4096
+.global boot_pd0_pt0
+boot_pd0_pt0:
+.skip 4096 * 4
+.global boot_pd3_pts
+boot_pd3_pts:
+.skip 4096 * 8
+.global boot_pd3_pt1023
+boot_pd3_pt1023:
+.skip 4096
+
+.section .text
+
+.global start
+.type start, @function
+
+.extern init
+.type init, @function
+
+.extern multiboot_info_ptr
+.type multiboot_info_ptr, @object
+
+start:
+ cli
+ cld
+
+ /* We don't know where the bootloader might have put the command line.
+ * It might be at an inconvenient location that we're not about to map,
+ * so let's just copy it to a convenient location while we have the whole
+ * memory space identity-mapped anyway. :^)
+ */
+
+ movl %ebx, %esi
+ addl $16, %esi
+ movl (%esi), %esi
+ movl $1024, %ecx
+ movl $(kernel_cmdline - 0xc0000000), %edi
+ rep movsl
+
+ call init
+ add $4, %esp
+
+ cli
+loop:
+ hlt
+ jmp loop
+
+.extern init_ap
+.type init_ap, @function
+
+/*
+ The apic_ap_start function will be loaded to P0x00008000 where the APIC
+ will boot the AP from in real mode. This code also contains space for
+ special variables that *must* remain here. When initializing the APIC,
+ the code here gets copied to P0x00008000, the variables in here get
+ populated and then the the boot of the APs will be triggered. Having
+ the variables here allows us to access them from real mode. Also, the
+ code here avoids the need for relocation entries.
+
+ Basically, the variables between apic_ap_start and end_apic_ap_start
+ *MUST* remain here and cannot be moved into a .bss or any other location.
+*/
+.global apic_ap_start
+.type apic_ap_start, @function
+apic_ap_start:
+.code16
+ cli
+ jmp $0x800, $(1f - apic_ap_start) /* avoid relocation entries */
+1:
+ mov %cs, %ax
+ mov %ax, %ds
+
+ xor %ax, %ax
+ mov %ax, %sp
+
+ /* load the first temporary gdt */
+ lgdt (ap_cpu_gdtr_initial - apic_ap_start)
+
+ /* enable PM */
+ movl %cr0, %eax
+ orl $1, %eax
+ movl %eax, %cr0
+
+ ljmpl $8, $(apic_ap_start32 - apic_ap_start + 0x8000)
+apic_ap_start32:
+.code32
+ cli
+ hlt
+
+.align 4
+.global apic_ap_start_size
+apic_ap_start_size:
+ .2byte end_apic_ap_start - apic_ap_start
+.align 4
+ap_cpu_id:
+ .4byte 0x0
+ap_cpu_gdt:
+ /* null */
+ .8byte 0x0
+ /* code */
+ .4byte 0x0000FFFF
+ .4byte 0x00cf9a00
+ /* data */
+ .4byte 0x0000FFFF
+ .4byte 0x00cf9200
+ap_cpu_gdt_end:
+ap_cpu_gdtr_initial:
+ .2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
+ .4byte (ap_cpu_gdt - apic_ap_start) + 0x8000
+ap_cpu_gdtr_initial2:
+ .2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
+ .4byte (ap_cpu_gdt - apic_ap_start) + 0xc0008000
+.global ap_cpu_gdtr
+ap_cpu_gdtr:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_idtr
+ap_cpu_idtr:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_cr0
+ap_cpu_init_cr0:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_cr3
+ap_cpu_init_cr3:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_cr4
+ap_cpu_init_cr4:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_processor_info_array
+ap_cpu_init_processor_info_array:
+ .4byte 0x0 /* will be set at runtime */
+.global ap_cpu_init_stacks
+ap_cpu_init_stacks:
+ /* array of allocated stack pointers */
+ /* NOTE: ap_cpu_init_stacks must be the last variable before
+ end_apic_ap_start! */
+.set end_apic_ap_start, .