summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorGunnar Beutner <gbeutner@serenityos.org>2021-05-20 01:30:36 +0200
committerAndreas Kling <kling@serenityos.org>2021-05-20 09:09:10 +0200
commitcac7a8ced9437a3187ab8d63dad42219c3f5e8b0 (patch)
tree42d6908d5d321c98049135004ad1edcf46df8520 /Kernel
parent7557f2db905eaf768b54be854d9bf1ce32f0973d (diff)
downloadserenity-cac7a8ced9437a3187ab8d63dad42219c3f5e8b0.zip
Kernel: Use the Function class for deferred_call_queue()
This avoids allocations for deferred_call_queue().
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Arch/i386/CPU.cpp39
-rw-r--r--Kernel/Arch/x86/CPU.h41
2 files changed, 25 insertions, 55 deletions
diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp
index 984a69dd05..3f45f124b1 100644
--- a/Kernel/Arch/i386/CPU.cpp
+++ b/Kernel/Arch/i386/CPU.cpp
@@ -2234,6 +2234,7 @@ UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
for (size_t i = 0; i < pool_count; i++) {
auto& entry = m_deferred_call_pool[i];
entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
+ new (entry.handler_storage) DeferredCallEntry::HandlerFunction;
entry.was_allocated = false;
}
m_pending_deferred_calls = nullptr;
@@ -2245,6 +2246,8 @@ void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
VERIFY(m_in_critical);
VERIFY(!entry->was_allocated);
+ entry->handler_value() = {};
+
entry->next = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry;
}
@@ -2262,6 +2265,7 @@ DeferredCallEntry* Processor::deferred_call_get_free()
}
auto* entry = new DeferredCallEntry;
+ new (entry->handler_storage) DeferredCallEntry::HandlerFunction;
entry->was_allocated = true;
return entry;
}
@@ -2290,20 +2294,14 @@ void Processor::deferred_call_execute_pending()
pending_list = reverse_list(pending_list);
do {
- // Call the appropriate callback handler
- if (pending_list->have_data) {
- pending_list->callback_with_data.handler(pending_list->callback_with_data.data);
- if (pending_list->callback_with_data.free)
- pending_list->callback_with_data.free(pending_list->callback_with_data.data);
- } else {
- pending_list->callback.handler();
- }
+ pending_list->invoke_handler();
// Return the entry back to the pool, or free it
auto* next = pending_list->next;
- if (pending_list->was_allocated)
+ if (pending_list->was_allocated) {
+ pending_list->handler_value().~Function();
delete pending_list;
- else
+ } else
deferred_call_return_to_pool(pending_list);
pending_list = next;
} while (pending_list);
@@ -2316,21 +2314,7 @@ void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
m_pending_deferred_calls = entry;
}
-void Processor::deferred_call_queue(void (*callback)())
-{
- // NOTE: If we are called outside of a critical section and outside
- // of an irq handler, the function will be executed before we return!
- ScopedCritical critical;
- auto& cur_proc = Processor::current();
-
- auto* entry = cur_proc.deferred_call_get_free();
- entry->have_data = false;
- entry->callback.handler = callback;
-
- cur_proc.deferred_call_queue_entry(entry);
-}
-
-void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*))
+void Processor::deferred_call_queue(Function<void()> callback)
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
@@ -2338,10 +2322,7 @@ void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*
auto& cur_proc = Processor::current();
auto* entry = cur_proc.deferred_call_get_free();
- entry->have_data = true;
- entry->callback_with_data.handler = callback;
- entry->callback_with_data.data = data;
- entry->callback_with_data.free = free_data;
+ entry->handler_value() = move(callback);
cur_proc.deferred_call_queue_entry(entry);
}
diff --git a/Kernel/Arch/x86/CPU.h b/Kernel/Arch/x86/CPU.h
index 060821cba7..4929ccba62 100644
--- a/Kernel/Arch/x86/CPU.h
+++ b/Kernel/Arch/x86/CPU.h
@@ -9,6 +9,7 @@
#include <AK/Atomic.h>
#include <AK/Badge.h>
#include <AK/Concepts.h>
+#include <AK/Function.h>
#include <AK/Noncopyable.h>
#include <AK/Vector.h>
@@ -608,19 +609,21 @@ struct ProcessorMessageEntry {
};
struct DeferredCallEntry {
+ using HandlerFunction = Function<void()>;
+
DeferredCallEntry* next;
- union {
- struct {
- void (*handler)();
- } callback;
- struct {
- void* data;
- void (*handler)(void*);
- void (*free)(void*);
- } callback_with_data;
- };
- bool have_data;
+ alignas(HandlerFunction) u8 handler_storage[sizeof(HandlerFunction)];
bool was_allocated;
+
+ HandlerFunction& handler_value()
+ {
+ return *bit_cast<HandlerFunction*>(&handler_storage);
+ }
+
+ void invoke_handler()
+ {
+ handler_value()();
+ }
};
class Processor;
@@ -975,21 +978,7 @@ public:
static void smp_broadcast_flush_tlb(const PageDirectory*, VirtualAddress, size_t);
static u32 smp_wake_n_idle_processors(u32 wake_count);
- template<typename Callback>
- static void deferred_call_queue(Callback callback)
- {
- auto* data = new Callback(move(callback));
- deferred_call_queue(
- [](void* data) {
- (*reinterpret_cast<Callback*>(data))();
- },
- data,
- [](void* data) {
- delete reinterpret_cast<Callback*>(data);
- });
- }
- static void deferred_call_queue(void (*callback)());
- static void deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*));
+ static void deferred_call_queue(Function<void()> callback);
ALWAYS_INLINE bool has_feature(CPUFeature f) const
{