diff options
-rw-r--r-- | Kernel/Arch/i386/CPU.cpp | 61 | ||||
-rw-r--r-- | Kernel/Arch/i386/CPU.h | 70 | ||||
-rw-r--r-- | Kernel/Lock.cpp | 15 | ||||
-rw-r--r-- | Kernel/Process.cpp | 46 | ||||
-rw-r--r-- | Kernel/Process.h | 3 | ||||
-rw-r--r-- | Kernel/Scheduler.cpp | 31 | ||||
-rw-r--r-- | Kernel/SpinLock.h | 41 | ||||
-rw-r--r-- | Kernel/Thread.cpp | 50 | ||||
-rw-r--r-- | Kernel/Thread.h | 4 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 5 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.h | 1 | ||||
-rw-r--r-- | Kernel/WaitQueue.cpp | 10 |
12 files changed, 242 insertions, 95 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp index 8a65051fbe..b7b8d5bbd6 100644 --- a/Kernel/Arch/i386/CPU.cpp +++ b/Kernel/Arch/i386/CPU.cpp @@ -828,6 +828,10 @@ void Processor::early_initialize(u32 cpu) m_cpu = cpu; m_in_irq = 0; + m_in_critical = 0; + + m_invoke_scheduler_async = false; + m_scheduler_initialized = false; m_idle_thread = nullptr; m_current_thread = nullptr; @@ -961,9 +965,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) void Processor::switch_context(Thread* from_thread, Thread* to_thread) { ASSERT(!in_irq()); + ASSERT(!m_in_critical); ASSERT(is_kernel_mode()); #ifdef CONTEXT_SWITCH_DEBUG - dbg() << "switch_context --> switching out of: " << *from_thread; + dbg() << "switch_context --> switching out of: " << VirtualAddress(from_thread) << " " << *from_thread; #endif // Switch to new thread context, passing from_thread and to_thread @@ -1006,7 +1011,7 @@ void Processor::switch_context(Thread* from_thread, Thread* to_thread) [to_thread] "a" (to_thread) ); #ifdef CONTEXT_SWITCH_DEBUG - dbg() << "switch_context <-- from " << *from_thread << " to " << *to_thread; + dbg() << "switch_context <-- from " << VirtualAddress(from_thread) << " " << *from_thread << " to " << VirtualAddress(to_thread) << " " << *to_thread; #endif } @@ -1017,9 +1022,14 @@ extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapF (void)from_thread; (void)to_thread; (void)trap; + + ASSERT(to_thread == Thread::current()); #ifdef CONTEXT_SWITCH_DEBUG - dbg() << "switch_context <-- from " << *from_thread << " to " << *to_thread << " (context_first_init)"; + dbg() << "switch_context <-- from " << VirtualAddress(from_thread) << " " << *from_thread << " to " << VirtualAddress(to_thread) << " " << *to_thread << " (context_first_init)"; #endif + if (to_thread->process().wait_for_tracer_at_next_execve()) { + to_thread->send_urgent_signal_to_self(SIGSTOP); + } } extern "C" void thread_context_first_enter(void); @@ -1038,9 +1048,15 @@ asm( " jmp common_trap_exit \n" ); -u32 Processor::init_context(Thread& thread) +u32 Processor::init_context(Thread& thread, bool leave_crit) { ASSERT(is_kernel_mode()); + if (leave_crit) { + ASSERT(in_critical()); + m_in_critical--; // leave it without triggering anything + ASSERT(!in_critical()); + } + const u32 kernel_stack_top = thread.kernel_stack_top(); u32 stack_top = kernel_stack_top; @@ -1098,7 +1114,10 @@ u32 Processor::init_context(Thread& thread) *reinterpret_cast<u32*>(stack_top) = stack_top + 4; #ifdef CONTEXT_SWITCH_DEBUG - dbg() << "init_context " << thread << " set up to execute at eip: " << VirtualAddress(tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top); + if (return_to_user) + dbg() << "init_context " << thread << " (" << VirtualAddress(&thread) << ") set up to execute at eip: " << String::format("%02x:%08x", iretframe.cs, (u32)tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top) << " user esp: " << String::format("%02x:%08x", iretframe.userspace_ss, (u32)iretframe.userspace_esp); + else + dbg() << "init_context " << thread << " (" << VirtualAddress(&thread) << ") set up to execute at eip: " << String::format("%02x:%08x", iretframe.cs, (u32)tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top); #endif // make switch_context() always first return to thread_context_first_enter() @@ -1118,24 +1137,29 @@ u32 Processor::init_context(Thread& thread) } -extern "C" u32 do_init_context(Thread* thread) +extern "C" u32 do_init_context(Thread* thread, u32 flags) { - return Processor::init_context(*thread); + ASSERT_INTERRUPTS_DISABLED(); + ASSERT(Processor::current().in_critical()); + thread->tss().eflags = flags; + return Processor::current().init_context(*thread, true); } -extern "C" void do_assume_context(Thread* thread); +extern "C" void do_assume_context(Thread* thread, u32 flags); asm( ".global do_assume_context \n" "do_assume_context: \n" " movl 4(%esp), %ebx \n" +" movl 8(%esp), %esi \n" // We're going to call Processor::init_context, so just make sure // we have enough stack space so we don't stomp over it " subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n" +" pushl %esi \n" " pushl %ebx \n" " cld \n" " call do_init_context \n" -" addl $4, %esp \n" +" addl $8, %esp \n" " movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us " pushl %ebx \n" // push to_thread " pushl %ebx \n" // push from_thread @@ -1143,9 +1167,13 @@ asm( " jmp enter_thread_context \n" ); -void Processor::assume_context(Thread& thread) +void Processor::assume_context(Thread& thread, u32 flags) { - do_assume_context(&thread); +#ifdef CONTEXT_SWITCH_DEBUG + dbg() << "Assume context for thread " << VirtualAddress(&thread) << " " << thread; +#endif + ASSERT_INTERRUPTS_DISABLED(); + do_assume_context(&thread, flags); ASSERT_NOT_REACHED(); } @@ -1161,6 +1189,7 @@ void Processor::initialize_context_switching(Thread& initial_thread) m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3; m_tss.fs = GDT_SELECTOR_PROC | 3; + m_scheduler_initialized = true; asm volatile( "movl %[new_esp], %%esp \n" // swich to new stack @@ -1197,7 +1226,15 @@ void Processor::exit_trap(TrapFrame& trap) ASSERT(m_in_irq >= trap.prev_irq_level); m_in_irq = trap.prev_irq_level; - if (m_invoke_scheduler_async && !m_in_irq) { + if (!m_in_irq && !m_in_critical) + check_invoke_scheduler(); +} + +void Processor::check_invoke_scheduler() +{ + ASSERT(!m_in_irq); + ASSERT(!m_in_critical); + if (m_invoke_scheduler_async && m_scheduler_initialized) { m_invoke_scheduler_async = false; Scheduler::invoke_async(); } diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index 6f6f7def92..bc41078a70 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -622,6 +622,7 @@ class Processor { u32 m_cpu; u32 m_in_irq; + u32 m_in_critical; TSS32 m_tss; static FPUState s_clean_fpu_state; @@ -632,6 +633,7 @@ class Processor { Thread* m_idle_thread; bool m_invoke_scheduler_async; + bool m_scheduler_initialized; void gdt_init(); void write_raw_gdt_entry(u16 selector, u32 low, u32 high); @@ -713,11 +715,32 @@ public: return m_in_irq; } + ALWAYS_INLINE void enter_critical(u32& prev_flags) + { + m_in_critical++; + prev_flags = cpu_flags(); + cli(); + } + + ALWAYS_INLINE void leave_critical(u32 prev_flags) + { + ASSERT(m_in_critical > 0); + if (--m_in_critical == 0) { + if (!m_in_irq) + check_invoke_scheduler(); + } + if (prev_flags & 0x200) + sti(); + } + + ALWAYS_INLINE u32& in_critical() { return m_in_critical; } + ALWAYS_INLINE const FPUState& clean_fpu_state() const { return s_clean_fpu_state; } + void check_invoke_scheduler(); void invoke_scheduler_async() { m_invoke_scheduler_async = true; } void enter_trap(TrapFrame& trap, bool raise_irq); @@ -725,12 +748,55 @@ public: [[noreturn]] void initialize_context_switching(Thread& initial_thread); void switch_context(Thread* from_thread, Thread* to_thread); - [[noreturn]] static void assume_context(Thread& thread); - static u32 init_context(Thread& thread); + [[noreturn]] static void assume_context(Thread& thread, u32 flags); + u32 init_context(Thread& thread, bool leave_crit); void set_thread_specific(u8* data, size_t len); }; +class ScopedCritical +{ + u32 m_prev_flags; + bool m_valid; + +public: + ScopedCritical(const ScopedCritical&) = delete; + ScopedCritical& operator=(const ScopedCritical&) = delete; + + ScopedCritical() + { + m_valid = true; + Processor::current().enter_critical(m_prev_flags); + } + + ~ScopedCritical() + { + if (m_valid) { + m_valid = false; + Processor::current().leave_critical(m_prev_flags); + } + } + + ScopedCritical(ScopedCritical&& from): + m_prev_flags(from.m_prev_flags), + m_valid(from.m_valid) + { + from.m_prev_flags = 0; + from.m_valid = false; + } + + ScopedCritical& operator=(ScopedCritical&& from) + { + if (&from != this) { + m_prev_flags = from.m_prev_flags; + m_valid = from.m_valid; + from.m_prev_flags = 0; + from.m_valid = false; + } + return *this; + } +}; + struct TrapFrame { u32 prev_irq_level; RegisterState* regs; // must be last diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index 4f6ec943af..05d516d672 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -24,6 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <AK/TemporaryChange.h> #include <Kernel/KSyms.h> #include <Kernel/Lock.h> #include <Kernel/Thread.h> @@ -67,6 +68,13 @@ void Lock::lock(Mode mode) } timeval* timeout = nullptr; current_thread->wait_on(m_queue, timeout, &m_lock, m_holder, m_name); + } else if (Processor::current().in_critical()) { + // If we're in a critical section and trying to lock, no context + // switch will happen, so yield. + // The assumption is that if we call this from a critical section + // that we DO want to temporarily leave it + TemporaryChange change(Processor::current().in_critical(), 0u); + Scheduler::yield(); } } } @@ -95,6 +103,9 @@ void Lock::unlock() return; } // I don't know *who* is using "m_lock", so just yield. + // The assumption is that if we call this from a critical section + // that we DO want to temporarily leave it + TemporaryChange change(Processor::current().in_critical(), 0u); Scheduler::yield(); } } @@ -102,7 +113,7 @@ void Lock::unlock() bool Lock::force_unlock_if_locked() { ASSERT(m_mode != Mode::Shared); - InterruptDisabler disabler; + ScopedCritical critical; if (m_holder != Thread::current()) return false; ASSERT(m_times_locked == 1); @@ -116,7 +127,7 @@ bool Lock::force_unlock_if_locked() void Lock::clear_waiters() { ASSERT(m_mode != Mode::Shared); - InterruptDisabler disabler; + ScopedCritical critical; m_queue.clear(); } diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index d4b804cd93..df83bd296a 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -84,10 +84,10 @@ #include <LibELF/Validation.h> #include <LibKeyboard/CharacterMapData.h> -#define PROCESS_DEBUG +//#define PROCESS_DEBUG //#define DEBUG_POLL_SELECT //#define DEBUG_IO -#define TASK_DEBUG +//#define TASK_DEBUG //#define FORK_DEBUG //#define EXEC_DEBUG //#define SIGNAL_DEBUG @@ -821,9 +821,10 @@ void Process::kill_all_threads() }); } -int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description) +int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags) { ASSERT(is_ring3()); + ASSERT(!Processor::current().in_critical()); auto path = main_program_description->absolute_path(); #ifdef EXEC_DEBUG dbg() << "do_exec(" << path << ")"; @@ -867,7 +868,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve { // Need to make sure we don't swap contexts in the middle - InterruptDisabler disabler; + ScopedCritical critical; old_page_directory = move(m_page_directory); old_regions = move(m_regions); m_page_directory = PageDirectory::create_for_userspace(*this); @@ -910,7 +911,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve ArmedScopeGuard rollback_regions_guard([&]() { ASSERT(Process::current() == this); // Need to make sure we don't swap contexts in the middle - InterruptDisabler disabler; + ScopedCritical critical; m_page_directory = move(old_page_directory); m_regions = move(old_regions); MM.enter_process_paging_scope(*this); @@ -963,6 +964,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve master_tls_alignment = alignment; return master_tls_region->vaddr().as_ptr(); }; + ASSERT(!Processor::current().in_critical()); bool success = loader->load(); if (!success) { klog() << "do_exec: Failure loading " << path.characters(); @@ -1026,7 +1028,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve } } - Thread* new_main_thread = nullptr; + new_main_thread = nullptr; if (¤t_thread->process() == this) { new_main_thread = current_thread; } else { @@ -1041,11 +1043,10 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve // and we don't want to deal with faults after this point. u32 new_userspace_esp = new_main_thread->make_userspace_stack_for_main_thread(move(arguments), move(environment)); - // We cli() manually here because we don't want to get interrupted between do_exec() and Processor::assume_context(). - // The reason is that the task redirection we've set up above will be clobbered by the timer IRQ. + // We cli() manually here because we don't want to get interrupted between do_exec() + // and Processor::assume_context() or the next context switch. // If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec(). - if (¤t_thread->process() == this) - cli(); + Processor::current().enter_critical(prev_flags); // NOTE: Be careful to not trigger any page faults below! @@ -1059,7 +1060,6 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve new_main_thread->make_thread_specific_region({}); new_main_thread->reset_fpu_state(); - // NOTE: if a context switch were to happen, tss.eip and tss.esp would get overwritten!!! auto& tss = new_main_thread->m_tss; tss.cs = GDT_SELECTOR_CODE3 | 3; tss.ds = GDT_SELECTOR_DATA3 | 3; @@ -1073,7 +1073,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve tss.ss2 = m_pid; #ifdef TASK_DEBUG - klog() << "Process exec'd " << path.characters() << " @ " << String::format("%p", entry_eip); + klog() << "Process " << VirtualAddress(this) << " thread " << VirtualAddress(new_main_thread) << " exec'd " << path.characters() << " @ " << String::format("%p", entry_eip); #endif if (was_profiling) @@ -1081,6 +1081,8 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve new_main_thread->set_state(Thread::State::Skip1SchedulerPass); big_lock().force_unlock_if_locked(); + ASSERT_INTERRUPTS_DISABLED(); + ASSERT(Processor::current().in_critical()); return 0; } @@ -1249,26 +1251,26 @@ int Process::exec(String path, Vector<String> arguments, Vector<String> environm // The bulk of exec() is done by do_exec(), which ensures that all locals // are cleaned up by the time we yield-teleport below. - int rc = do_exec(move(description), move(arguments), move(environment), move(interpreter_description)); + Thread* new_main_thread = nullptr; + u32 prev_flags = 0; + int rc = do_exec(move(description), move(arguments), move(environment), move(interpreter_description), new_main_thread, prev_flags); m_exec_tid = 0; if (rc < 0) return rc; - auto current_thread = Thread::current(); - if (m_wait_for_tracer_at_next_execve) { - ASSERT(current_thread->state() == Thread::State::Skip1SchedulerPass); - // State::Skip1SchedulerPass is irrelevant since we block the thread - current_thread->set_state(Thread::State::Running); - current_thread->send_urgent_signal_to_self(SIGSTOP); - } + ASSERT_INTERRUPTS_DISABLED(); + ASSERT(Processor::current().in_critical()); - if (¤t_thread->process() == this) { + auto current_thread = Thread::current(); + if (current_thread == new_main_thread) { current_thread->set_state(Thread::State::Running); - Processor::assume_context(*current_thread); + Processor::assume_context(*current_thread, prev_flags); ASSERT_NOT_REACHED(); } + + Processor::current().leave_critical(prev_flags); return 0; } diff --git a/Kernel/Process.h b/Kernel/Process.h index c595b9dd78..6592a9f787 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -450,6 +450,7 @@ public: void increment_inspector_count(Badge<ProcessInspectionHandle>) { ++m_inspector_count; } void decrement_inspector_count(Badge<ProcessInspectionHandle>) { --m_inspector_count; } + bool wait_for_tracer_at_next_execve() const { return m_wait_for_tracer_at_next_execve; } void set_wait_for_tracer_at_next_execve(bool val) { m_wait_for_tracer_at_next_execve = val; } KResultOr<u32> peek_user_data(u32* address); @@ -470,7 +471,7 @@ private: void kill_threads_except_self(); void kill_all_threads(); - int do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description); + int do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags); ssize_t do_write(FileDescription&, const u8*, int data_size); KResultOr<NonnullRefPtr<FileDescription>> find_elf_interpreter_for_executable(const String& path, char (&first_page)[PAGE_SIZE], int nread, size_t file_size); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 1ef990c2bb..9d36b9eda7 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -327,7 +327,7 @@ void Scheduler::start() idle_thread.set_ticks_left(time_slice_for(idle_thread)); idle_thread.did_schedule(); idle_thread.set_initialized(true); - Processor::init_context(idle_thread); + processor.init_context(idle_thread, false); idle_thread.set_state(Thread::Running); ASSERT(idle_thread.affinity() == (1u << processor.id())); processor.initialize_context_switching(idle_thread); @@ -453,21 +453,23 @@ bool Scheduler::pick_next() bool Scheduler::yield() { + InterruptDisabler disabler; auto& proc = Processor::current(); auto current_thread = Thread::current(); #ifdef SCHEDULER_DEBUG - dbg() << "Scheduler[" << Processor::current().id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq(); + dbg() << "Scheduler[" << proc.id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq(); #endif - InterruptDisabler disabler; ASSERT(current_thread != nullptr); - if (proc.in_irq()) { - // If we're handling an IRQ we can't switch context, delay until - // exiting the trap + if (proc.in_irq() || proc.in_critical()) { + // If we're handling an IRQ we can't switch context, or we're in + // a critical section where we don't want to switch contexts, then + // delay until exiting the trap or critical section proc.invoke_scheduler_async(); + return false; } else if (!Scheduler::pick_next()) return false; #ifdef SCHEDULER_DEBUG - dbg() << "Scheduler[" << Processor::current().id() << "]: yield returns to thread " << *current_thread << " in_irq: " << proc.in_irq(); + dbg() << "Scheduler[" << proc.id() << "]: yield returns to thread " << *current_thread << " in_irq: " << proc.in_irq(); #endif return true; } @@ -475,10 +477,16 @@ bool Scheduler::yield() bool Scheduler::donate_to(Thread* beneficiary, const char* reason) { InterruptDisabler disabler; - ASSERT(!Processor::current().in_irq()); + auto& proc = Processor::current(); + ASSERT(!proc.in_irq()); if (!Thread::is_thread(beneficiary)) return false; + if (proc.in_critical()) { + proc.invoke_scheduler_async(); + return false; + } + (void)reason; unsigned ticks_left = Thread::current()->ticks_left(); if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1) @@ -486,7 +494,7 @@ bool Scheduler::donate_to(Thread* beneficiary, const char* reason) unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary)); #ifdef SCHEDULER_DEBUG - dbg() << "Scheduler[" << Processor::current().id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason; + dbg() << "Scheduler[" << proc.id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason; #endif beneficiary->set_ticks_left(ticks_to_donate); Scheduler::context_switch(*beneficiary); @@ -513,13 +521,14 @@ bool Scheduler::context_switch(Thread& thread) #endif } + auto& proc = Processor::current(); if (!thread.is_initialized()) { - Processor::init_context(thread); + proc.init_context(thread, false); thread.set_initialized(true); } thread.set_state(Thread::Running); - Processor::current().switch_context(current_thread, &thread); + proc.switch_context(current_thread, &thread); return true; } diff --git a/Kernel/SpinLock.h b/Kernel/SpinLock.h index e0855a0790..a62b2519bc 100644 --- a/Kernel/SpinLock.h +++ b/Kernel/SpinLock.h @@ -43,19 +43,23 @@ public: SpinLock(const SpinLock&) = delete; SpinLock(SpinLock&&) = delete; - ALWAYS_INLINE void lock() + ALWAYS_INLINE u32 lock() { + u32 prev_flags; + Processor::current().enter_critical(prev_flags); BaseType expected; do { expected = 0; } while (!m_lock.compare_exchange_strong(expected, 1, AK::memory_order_acq_rel)); + return prev_flags; } - ALWAYS_INLINE void unlock() + ALWAYS_INLINE void unlock(u32 prev_flags) { ASSERT(is_locked()); m_lock.store(0, AK::memory_order_release); + Processor::current().leave_critical(prev_flags); } ALWAYS_INLINE bool is_locked() const @@ -79,9 +83,12 @@ public: RecursiveSpinLock(const RecursiveSpinLock&) = delete; RecursiveSpinLock(RecursiveSpinLock&&) = delete; - ALWAYS_INLINE void lock() + ALWAYS_INLINE u32 lock() { - FlatPtr cpu = FlatPtr(&Processor::current()); + auto& proc = Processor::current(); + FlatPtr cpu = FlatPtr(&proc); + u32 prev_flags; + proc.enter_critical(prev_flags); FlatPtr expected = 0; while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { if (expected == cpu) @@ -89,14 +96,16 @@ public: expected = 0; } m_recursions++; + return prev_flags; } - ALWAYS_INLINE void unlock() + ALWAYS_INLINE void unlock(u32 prev_flags) { ASSERT(m_recursions > 0); ASSERT(m_lock.load(AK::memory_order_consume) == FlatPtr(&Processor::current())); if (--m_recursions == 0) m_lock.store(0, AK::memory_order_release); + Processor::current().leave_critical(prev_flags); } ALWAYS_INLINE bool is_locked() const @@ -114,8 +123,8 @@ template <typename BaseType = u32, typename LockType = SpinLock<BaseType>> class ScopedSpinLock { LockType* m_lock; + u32 m_prev_flags{0}; bool m_have_lock{false}; - bool m_flag{false}; public: ScopedSpinLock() = delete; @@ -124,19 +133,18 @@ public: m_lock(&lock) { ASSERT(m_lock); - m_flag = cli_and_save_interrupt_flag(); - m_lock->lock(); + m_prev_flags = m_lock->lock(); m_have_lock = true; } ScopedSpinLock(ScopedSpinLock&& from): m_lock(from.m_lock), - m_have_lock(from.m_have_lock), - m_flag(from.m_flag) + m_prev_flags(from.m_prev_flags), + m_have_lock(from.m_have_lock) { from.m_lock = nullptr; + from.m_prev_flags = 0; from.m_have_lock = false; - from.m_flag = false; } ScopedSpinLock(const ScopedSpinLock&) = delete; @@ -144,8 +152,7 @@ public: ~ScopedSpinLock() { if (m_lock && m_have_lock) { - m_lock->unlock(); - restore_interrupt_flag(m_flag); + m_lock->unlock(m_prev_flags); } } @@ -153,8 +160,7 @@ public: { ASSERT(m_lock); ASSERT(!m_have_lock); - m_flag = cli_and_save_interrupt_flag(); - m_lock->lock(); + m_prev_flags = m_lock->lock(); m_have_lock = true; } @@ -162,10 +168,9 @@ public: { ASSERT(m_lock); ASSERT(m_have_lock); - m_lock->unlock(); + m_lock->unlock(m_prev_flags); + m_prev_flags = 0; m_have_lock = false; - restore_interrupt_flag(m_flag); - m_flag = false; } ALWAYS_INLINE bool have_lock() const diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 9dcb8964b6..532346f472 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -181,7 +181,8 @@ void Thread::die_if_needed() if (!m_should_die) return; - unlock_process_if_locked(); + u32 prev_crit; + unlock_process_if_locked(prev_crit); InterruptDisabler disabler; set_state(Thread::State::Dying); @@ -190,20 +191,26 @@ void Thread::die_if_needed() void Thread::yield_without_holding_big_lock() { - bool did_unlock = unlock_process_if_locked(); + u32 prev_crit; + bool did_unlock = unlock_process_if_locked(prev_crit); Scheduler::yield(); - if (did_unlock) - relock_process(); + relock_process(did_unlock, prev_crit); } -bool Thread::unlock_process_if_locked() +bool Thread::unlock_process_if_locked(u32& prev_crit) { + auto& in_critical = Processor::current().in_critical(); + prev_crit = in_critical; + in_critical = 0; return process().big_lock().force_unlock_if_locked(); } -void Thread::relock_process() +void Thread::relock_process(bool did_unlock, u32 prev_crit) { - process().big_lock().lock(); + if (did_unlock) + process().big_lock().lock(); + ASSERT(!Processor::current().in_critical()); + Processor::current().in_critical() = prev_crit; } u64 Thread::sleep(u32 ticks) @@ -328,6 +335,7 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) dbg() << "Signal: Kernel sent " << signal << " to " << process(); #endif + ScopedSpinLock lock(g_scheduler_lock); m_pending_signals |= 1 << (signal - 1); } @@ -339,13 +347,10 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) // the appropriate signal handler. void Thread::send_urgent_signal_to_self(u8 signal) { - // FIXME: because of a bug in dispatch_signal we can't - // setup a signal while we are the current thread. Because of - // this we use a work-around where we send the signal and then - // block, allowing the scheduler to properly dispatch the signal - // before the thread is next run. - send_signal(signal, &process()); - (void)block<SemiPermanentBlocker>(SemiPermanentBlocker::Reason::Signal); + ASSERT(Thread::current() == this); + ScopedSpinLock lock(g_scheduler_lock); + if (dispatch_signal(signal) == ShouldUnblockThread::No) + Scheduler::yield(); } ShouldUnblockThread Thread::dispatch_one_pending_signal() @@ -443,6 +448,7 @@ static void push_value_on_user_stack(u32* stack, u32 data) ShouldUnblockThread Thread::dispatch_signal(u8 signal) { ASSERT_INTERRUPTS_DISABLED(); + ASSERT(g_scheduler_lock.is_locked()); ASSERT(signal > 0 && signal <= 32); ASSERT(!process().is_ring0()); @@ -540,6 +546,10 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal) u32 ret_eip = state.eip; u32 ret_eflags = state.eflags; +#ifdef SIGNAL_DEBUG + klog() << "signal: setting up user stack to return to eip: " << String::format("%p", ret_eip) << " esp: " << String::format("%p", old_esp); +#endif + // Align the stack to 16 bytes. // Note that we push 56 bytes (4 * 14) on to the stack, // so we need to account for this here. @@ -698,7 +708,7 @@ bool Thread::is_thread(void* ptr) void Thread::set_state(State new_state) { - InterruptDisabler disabler; + ScopedSpinLock lock(g_scheduler_lock); if (new_state == m_state) return; @@ -712,6 +722,9 @@ void Thread::set_state(State new_state) } m_state = new_state; +#ifdef THREAD_DEBUG + dbg() << "Set Thread " << VirtualAddress(this) << " " << *this << " state to " << state_string(); +#endif if (m_process.pid() != 0) { Scheduler::update_state_for_thread(*this); } @@ -842,11 +855,12 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value) Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary, const char* reason) { TimerId timer_id {}; + u32 prev_crit; bool did_unlock; { InterruptDisabler disable; - did_unlock = unlock_process_if_locked(); + did_unlock = unlock_process_if_locked(prev_crit); if (lock) *lock = false; set_state(State::Queued); @@ -870,8 +884,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<b sti(); // We've unblocked, relock the process if needed and carry on. - if (did_unlock) - relock_process(); + relock_process(did_unlock, prev_crit); BlockResult result = m_wait_queue_node.is_in_list() ? BlockResult::InterruptedByTimeout : BlockResult::WokeNormally; @@ -884,6 +897,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<b void Thread::wake_from_queue() { + ScopedSpinLock lock(g_scheduler_lock); ASSERT(state() == State::Queued); if (this != Thread::current()) set_state(State::Runnable); diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 4ad06d49fa..5bac56e195 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -463,8 +463,8 @@ private: private: friend class SchedulerData; friend class WaitQueue; - bool unlock_process_if_locked(); - void relock_process(); + bool unlock_process_if_locked(u32& prev_crit); + void relock_process(bool did_unlock, u32 prev_crit); String backtrace_impl() const; void reset_fpu_state(); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index b43bd9c0f5..eefa569f3b 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -591,7 +591,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page) { ASSERT_INTERRUPTS_DISABLED(); auto& mm_data = get_data(); - mm_data.m_quickmap_in_use.lock(); + mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); ScopedSpinLock lock(s_lock); u32 pte_idx = 8 + Processor::current().id(); @@ -622,7 +622,7 @@ void MemoryManager::unquickmap_page() auto& pte = boot_pd3_pt1023[pte_idx]; pte.clear(); flush_tlb(vaddr); - mm_data.m_quickmap_in_use.unlock(); + mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); } template<MemoryManager::AccessSpace space, MemoryManager::AccessType access_type> @@ -736,6 +736,7 @@ void MemoryManager::dump_kernel_regions() { klog() << "Kernel regions:"; klog() << "BEGIN END SIZE ACCESS NAME"; + ScopedSpinLock lock(s_lock); for (auto& region : MM.m_kernel_regions) { klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters(); } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index c9e72fd1bc..f176424182 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -69,6 +69,7 @@ class SynthFSInode; struct MemoryManagerData { SpinLock<u8> m_quickmap_in_use; + u32 m_quickmap_prev_flags; }; class MemoryManager { diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp index b5d5d759dd..c5122f64f3 100644 --- a/Kernel/WaitQueue.cpp +++ b/Kernel/WaitQueue.cpp @@ -39,13 +39,13 @@ WaitQueue::~WaitQueue() void WaitQueue::enqueue(Thread& thread) { - InterruptDisabler disabler; + ScopedCritical critical; m_threads.append(thread); } void WaitQueue::wake_one(Atomic<bool>* lock) { - InterruptDisabler disabler; + ScopedCritical critical; if (lock) *lock = false; if (m_threads.is_empty()) @@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic<bool>* lock) void WaitQueue::wake_n(i32 wake_count) { - InterruptDisabler disabler; + ScopedCritical critical; if (m_threads.is_empty()) return; @@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count) void WaitQueue::wake_all() { - InterruptDisabler disabler; + ScopedCritical critical; if (m_threads.is_empty()) return; while (!m_threads.is_empty()) @@ -82,7 +82,7 @@ void WaitQueue::wake_all() void WaitQueue::clear() { - InterruptDisabler disabler; + ScopedCritical critical; m_threads.clear(); } |