diff options
author | Tom <tomut@yahoo.com> | 2020-11-01 13:08:25 -0700 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2020-11-04 21:21:37 +0100 |
commit | b9a97ff81fbcadacf001472a0f71306d67d56eb8 (patch) | |
tree | f4999066ce3d3e0c6315bb36e764027aec2d93aa /Kernel | |
parent | 4713e6185f8719cc0b4f404d82543cd65cd560a4 (diff) | |
download | serenity-b9a97ff81fbcadacf001472a0f71306d67d56eb8.zip |
Kernel: Add mechanism to queue deferred function calls
Function calls that are deferred will be executed before a thread
enters a pre-emptable state (meaning it is not in a critical section
and it is not in an irq handler). If it is not already in such a
state, it will be called immediately.
This is meant to be used from e.g. IRQ handlers where we might want
to block a thread until an interrupt happens.
Diffstat (limited to 'Kernel')
-rw-r--r-- | Kernel/Arch/i386/CPU.cpp | 121 | ||||
-rw-r--r-- | Kernel/Arch/i386/CPU.h | 66 |
2 files changed, 185 insertions, 2 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp index 1ee3884a9e..bb82d7fb5e 100644 --- a/Kernel/Arch/i386/CPU.cpp +++ b/Kernel/Arch/i386/CPU.cpp @@ -1193,6 +1193,8 @@ void Processor::early_initialize(u32 cpu) atomic_fetch_add(&g_total_processors, 1u, AK::MemoryOrder::memory_order_acq_rel); } + deferred_call_pool_init(); + cpu_setup(); gdt_init(); ASSERT(¤t() == this); // sanity check @@ -1932,6 +1934,125 @@ void Processor::Processor::halt() halt_this(); } +void Processor::deferred_call_pool_init() +{ + size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]); + for (size_t i = 0; i < pool_count; i++) { + auto& entry = m_deferred_call_pool[i]; + entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr; + entry.was_allocated = false; + } + m_pending_deferred_calls = nullptr; + m_free_deferred_call_pool_entry = &m_deferred_call_pool[0]; +} + +void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry) +{ + ASSERT(m_in_critical); + ASSERT(!entry->was_allocated); + + entry->next = m_free_deferred_call_pool_entry; + m_free_deferred_call_pool_entry = entry; +} + +DeferredCallEntry* Processor::deferred_call_get_free() +{ + ASSERT(m_in_critical); + + if (m_free_deferred_call_pool_entry) { + // Fast path, we have an entry in our pool + auto* entry = m_free_deferred_call_pool_entry; + m_free_deferred_call_pool_entry = entry->next; + ASSERT(!entry->was_allocated); + return entry; + } + + auto* entry = new DeferredCallEntry; + entry->was_allocated = true; + return entry; +} + +void Processor::deferred_call_execute_pending() +{ + ASSERT(m_in_critical); + + if (!m_pending_deferred_calls) + return; + auto* pending_list = m_pending_deferred_calls; + m_pending_deferred_calls = nullptr; + + // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first + auto reverse_list = + [](DeferredCallEntry* list) -> DeferredCallEntry* + { + DeferredCallEntry* rev_list = nullptr; + while (list) { + auto next = list->next; + list->next = rev_list; + rev_list = list; + list = next; + } + return rev_list; + }; + pending_list = reverse_list(pending_list); + + do { + // Call the appropriate callback handler + if (pending_list->have_data) { + pending_list->callback_with_data.handler(pending_list->callback_with_data.data); + if (pending_list->callback_with_data.free) + pending_list->callback_with_data.free(pending_list->callback_with_data.data); + } else { + pending_list->callback.handler(); + } + + // Return the entry back to the pool, or free it + auto* next = pending_list->next; + if (pending_list->was_allocated) + delete pending_list; + else + deferred_call_return_to_pool(pending_list); + pending_list = next; + } while (pending_list); +} + +void Processor::deferred_call_queue_entry(DeferredCallEntry* entry) +{ + ASSERT(m_in_critical); + entry->next = m_pending_deferred_calls; + m_pending_deferred_calls = entry; +} + +void Processor::deferred_call_queue(void (*callback)()) +{ + // NOTE: If we are called outside of a critical section and outside + // of an irq handler, the function will be executed before we return! + ScopedCritical critical; + auto& cur_proc = Processor::current(); + + auto* entry = cur_proc.deferred_call_get_free(); + entry->have_data = false; + entry->callback.handler = callback; + + cur_proc.deferred_call_queue_entry(entry); +} + +void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*)) +{ + // NOTE: If we are called outside of a critical section and outside + // of an irq handler, the function will be executed before we return! + ScopedCritical critical; + auto& cur_proc = Processor::current(); + + auto* entry = cur_proc.deferred_call_get_free(); + entry->have_data = true; + entry->callback_with_data.handler = callback; + entry->callback_with_data.data = data; + entry->callback_with_data.free = free_data; + + cur_proc.deferred_call_queue_entry(entry); +} + void Processor::gdt_init() { m_gdt_length = 0; diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index e740370305..ab8d551b6c 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -677,6 +677,22 @@ struct ProcessorMessageEntry { ProcessorMessage* msg; }; +struct DeferredCallEntry { + DeferredCallEntry* next; + union { + struct { + void (*handler)(); + } callback; + struct { + void* data; + void (*handler)(void*); + void (*free)(void*); + } callback_with_data; + }; + bool have_data; + bool was_allocated; +}; + class Processor { friend class ProcessorInfo; @@ -710,6 +726,10 @@ class Processor { bool m_scheduler_initialized; bool m_halt_requested; + DeferredCallEntry* m_pending_deferred_calls; // in reverse order + DeferredCallEntry* m_free_deferred_call_pool_entry; + DeferredCallEntry m_deferred_call_pool[5]; + void gdt_init(); void write_raw_gdt_entry(u16 selector, u32 low, u32 high); void write_gdt_entry(u16 selector, Descriptor& descriptor); @@ -722,6 +742,12 @@ class Processor { static void smp_broadcast_message(ProcessorMessage& msg, bool async); static void smp_broadcast_halt(); + void deferred_call_pool_init(); + void deferred_call_execute_pending(); + DeferredCallEntry* deferred_call_get_free(); + void deferred_call_return_to_pool(DeferredCallEntry*); + void deferred_call_queue_entry(DeferredCallEntry*); + void cpu_detect(); void cpu_setup(); @@ -843,7 +869,19 @@ public: ALWAYS_INLINE void restore_irq(u32 prev_irq) { ASSERT(prev_irq <= m_in_irq); - m_in_irq = prev_irq; + if (!prev_irq) { + if (m_in_critical == 0) { + auto prev_critical = m_in_critical++; + m_in_irq = prev_irq; + deferred_call_execute_pending(); + ASSERT(m_in_critical == prev_critical + 1); + m_in_critical = prev_critical; + } + if (!m_in_critical) + check_invoke_scheduler(); + } else { + m_in_irq = prev_irq; + } } ALWAYS_INLINE u32& in_irq() @@ -860,10 +898,18 @@ public: ALWAYS_INLINE void leave_critical(u32 prev_flags) { + cli(); // Need to prevent IRQs from interrupting us here! ASSERT(m_in_critical > 0); - if (--m_in_critical == 0) { + if (m_in_critical == 1) { + if (!m_in_irq) { + deferred_call_execute_pending(); + ASSERT(m_in_critical == 1); + } + m_in_critical--; if (!m_in_irq) check_invoke_scheduler(); + } else { + m_in_critical--; } if (prev_flags & 0x200) sti(); @@ -921,6 +967,22 @@ public: static void smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async); static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count); + template<typename Callback> + static void deferred_call_queue(Callback callback) + { + auto* data = new Callback(move(callback)); + deferred_call_queue( + [](void* data) { + (*reinterpret_cast<Callback*>(data))(); + }, + data, + [](void* data) { + delete reinterpret_cast<Callback*>(data); + }); + } + static void deferred_call_queue(void (*callback)()); + static void deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*)); + ALWAYS_INLINE bool has_feature(CPUFeature f) const { return (static_cast<u32>(m_features) & static_cast<u32>(f)) != 0; |