summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIdan Horowitz <idan.horowitz@gmail.com>2023-04-02 03:26:57 +0300
committerAndrew Kaster <andrewdkaster@gmail.com>2023-04-03 02:59:37 -0600
commita349570a04a07b96059da49d325515f63151c4b5 (patch)
tree9b7728430033ac45a4a80fa6e493e2cf29c2363f
parent6ad8f4bb1135275d7cfe8cbbd46819f9160bde8e (diff)
downloadserenity-a349570a04a07b96059da49d325515f63151c4b5.zip
Kernel: Abstract Processor::assume_context flags using InterruptsState
The details of the specific interrupt bits that must be turned on are irrelevant to the sys$execve implementation. Abstract it away to the Processor implementations using the InterruptsState enum.
-rw-r--r--Kernel/Arch/aarch64/Processor.cpp4
-rw-r--r--Kernel/Arch/aarch64/Processor.h3
-rw-r--r--Kernel/Arch/x86_64/Processor.cpp3
-rw-r--r--Kernel/Arch/x86_64/Processor.h3
-rw-r--r--Kernel/Syscalls/execve.cpp9
5 files changed, 9 insertions, 13 deletions
diff --git a/Kernel/Arch/aarch64/Processor.cpp b/Kernel/Arch/aarch64/Processor.cpp
index 70894fe1dc..3946f8ea33 100644
--- a/Kernel/Arch/aarch64/Processor.cpp
+++ b/Kernel/Arch/aarch64/Processor.cpp
@@ -255,10 +255,10 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
}
-void Processor::assume_context(Thread& thread, FlatPtr flags)
+void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_state)
{
(void)thread;
- (void)flags;
+ (void)new_interrupts_state;
TODO_AARCH64();
}
diff --git a/Kernel/Arch/aarch64/Processor.h b/Kernel/Arch/aarch64/Processor.h
index 2baaec99c5..9db1950180 100644
--- a/Kernel/Arch/aarch64/Processor.h
+++ b/Kernel/Arch/aarch64/Processor.h
@@ -25,6 +25,7 @@ class PageDirectory;
class Thread;
class Processor;
struct TrapFrame;
+enum class InterruptsState;
// FIXME This needs to go behind some sort of platform abstraction
// it is used between Thread and Processor.
@@ -270,7 +271,7 @@ public:
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
- [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
+ [[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
diff --git a/Kernel/Arch/x86_64/Processor.cpp b/Kernel/Arch/x86_64/Processor.cpp
index f2ee57fa09..762bb7a6b8 100644
--- a/Kernel/Arch/x86_64/Processor.cpp
+++ b/Kernel/Arch/x86_64/Processor.cpp
@@ -1569,7 +1569,7 @@ extern "C" FlatPtr do_init_context(Thread* thread, u32 flags)
return Processor::current().init_context(*thread, true);
}
-void Processor::assume_context(Thread& thread, FlatPtr flags)
+void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_state)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
@@ -1579,6 +1579,7 @@ void Processor::assume_context(Thread& thread, FlatPtr flags)
// and then the scheduler lock
VERIFY(Processor::in_critical() == 2);
+ u32 flags = 2 | (new_interrupts_state == InterruptsState::Enabled ? 0x200 : 0);
do_assume_context(&thread, flags);
VERIFY_NOT_REACHED();
diff --git a/Kernel/Arch/x86_64/Processor.h b/Kernel/Arch/x86_64/Processor.h
index 5df979d0a7..66832eb926 100644
--- a/Kernel/Arch/x86_64/Processor.h
+++ b/Kernel/Arch/x86_64/Processor.h
@@ -55,6 +55,7 @@ struct [[gnu::aligned(64), gnu::packed]] FPUState
u8 ext_save_area[256];
};
+enum class InterruptsState;
class Processor;
// Note: We only support 64 processors at most at the moment,
// so allocate 64 slots of inline capacity in the container.
@@ -414,7 +415,7 @@ public:
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
- [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
+ [[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp
index 83e2b37a66..1c08497fca 100644
--- a/Kernel/Syscalls/execve.cpp
+++ b/Kernel/Syscalls/execve.cpp
@@ -980,15 +980,8 @@ ErrorOr<FlatPtr> Process::sys$execve(Userspace<Syscall::SC_execve_params const*>
VERIFY(Processor::in_critical() == 1);
g_scheduler_lock.lock();
current_thread->set_state(Thread::State::Running);
-#if ARCH(X86_64)
- FlatPtr prev_flags = previous_interrupts_state == InterruptsState::Enabled ? 0x200 : 0;
- Processor::assume_context(*current_thread, prev_flags);
+ Processor::assume_context(*current_thread, previous_interrupts_state);
VERIFY_NOT_REACHED();
-#elif ARCH(AARCH64)
- TODO_AARCH64();
-#else
-# error Unknown architecture
-#endif
}
// NOTE: This code path is taken in the non-syscall case, i.e when the kernel spawns