summaryrefslogtreecommitdiff
path: root/Kernel/Scheduler.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-07-05 23:07:18 +0200
committerAndreas Kling <kling@serenityos.org>2021-07-05 23:30:15 +0200
commit565796ae4ef92d57bb40191c64d4512b58eb8ab2 (patch)
treea4e5b203ef0e51b1777946bc9ed1751394de85fb /Kernel/Scheduler.cpp
parentc40780d404b2f65c13a5124c43f2a243da00840c (diff)
downloadserenity-565796ae4ef92d57bb40191c64d4512b58eb8ab2.zip
Kernel+LibC: Remove sys$donate()
This was an old SerenityOS-specific syscall for donating the remainder of the calling thread's time-slice to another thread within the same process. Now that Threading::Lock uses a pthread_mutex_t internally, we no longer need this syscall, which allows us to get rid of a surprising amount of unnecessary scheduler logic. :^)
Diffstat (limited to 'Kernel/Scheduler.cpp')
-rw-r--r--Kernel/Scheduler.cpp87
1 files changed, 0 insertions, 87 deletions
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index 850553b157..7d2e072588 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -31,8 +31,6 @@ class SchedulerPerProcessorData {
public:
SchedulerPerProcessorData() = default;
- WeakPtr<Thread> m_pending_beneficiary;
- const char* m_pending_donate_reason { nullptr };
bool m_in_scheduler { true };
};
@@ -206,26 +204,6 @@ bool Scheduler::pick_next()
dump_thread_list();
}
- auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref();
- if (pending_beneficiary && dequeue_runnable_thread(*pending_beneficiary, true)) {
- // The thread we're supposed to donate to still exists and we can
- const char* reason = scheduler_data.m_pending_donate_reason;
- scheduler_data.m_pending_beneficiary = nullptr;
- scheduler_data.m_pending_donate_reason = nullptr;
-
- // We need to leave our first critical section before switching context,
- // but since we're still holding the scheduler lock we're still in a critical section
- critical.leave();
-
- dbgln_if(SCHEDULER_DEBUG, "Processing pending donate to {} reason={}", *pending_beneficiary, reason);
- return donate_to_and_switch(pending_beneficiary.ptr(), reason);
- }
-
- // Either we're not donating or the beneficiary disappeared.
- // Either way clear any pending information
- scheduler_data.m_pending_beneficiary = nullptr;
- scheduler_data.m_pending_donate_reason = nullptr;
-
auto& thread_to_schedule = pull_next_runnable_thread();
if constexpr (SCHEDULER_DEBUG) {
#if ARCH(I386)
@@ -250,11 +228,6 @@ bool Scheduler::yield()
{
InterruptDisabler disabler;
auto& proc = Processor::current();
- auto& scheduler_data = proc.get_scheduler_data();
-
- // Clear any pending beneficiary
- scheduler_data.m_pending_beneficiary = nullptr;
- scheduler_data.m_pending_donate_reason = nullptr;
auto current_thread = Thread::current();
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq());
@@ -275,66 +248,6 @@ bool Scheduler::yield()
return true;
}
-bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const char* reason)
-{
- VERIFY(g_scheduler_lock.own_lock());
-
- auto& proc = Processor::current();
- VERIFY(proc.in_critical() == 1);
-
- unsigned ticks_left = Thread::current()->ticks_left();
- if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
- return Scheduler::yield();
-
- unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
- dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Donating {} ticks to {}, reason={}", proc.get_id(), ticks_to_donate, *beneficiary, reason);
- beneficiary->set_ticks_left(ticks_to_donate);
-
- return Scheduler::context_switch(beneficiary);
-}
-
-bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
-{
- VERIFY(beneficiary);
-
- if (beneficiary == Thread::current())
- return Scheduler::yield();
-
- // Set the m_in_scheduler flag before acquiring the spinlock. This
- // prevents a recursive call into Scheduler::invoke_async upon
- // leaving the scheduler lock.
- ScopedCritical critical;
- auto& proc = Processor::current();
- auto& scheduler_data = proc.get_scheduler_data();
- scheduler_data.m_in_scheduler = true;
- ScopeGuard guard(
- []() {
- // We may be on a different processor after we got switched
- // back to this thread!
- auto& scheduler_data = Processor::current().get_scheduler_data();
- VERIFY(scheduler_data.m_in_scheduler);
- scheduler_data.m_in_scheduler = false;
- });
-
- VERIFY(!proc.in_irq());
-
- if (proc.in_critical() > 1) {
- scheduler_data.m_pending_beneficiary = beneficiary; // Save the beneficiary
- scheduler_data.m_pending_donate_reason = reason;
- proc.invoke_scheduler_async();
- return false;
- }
-
- ScopedSpinLock lock(g_scheduler_lock);
-
- // "Leave" the critical section before switching context. Since we
- // still hold the scheduler lock, we're not actually leaving it.
- // Processor::switch_context expects Processor::in_critical() to be 1
- critical.leave();
- donate_to_and_switch(beneficiary, reason);
- return false;
-}
-
bool Scheduler::context_switch(Thread* thread)
{
if (s_mm_lock.own_lock()) {