summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-11-30 19:04:36 -0700
committerAndreas Kling <kling@serenityos.org>2020-12-01 09:48:34 +0100
commit78f1b5e359f1436f90d1c54929be30c0e41d8df0 (patch)
tree439c4da3f7c26779775ebb2a2150141f67330d53 /Kernel
parent9e32d79e02c84c0130c9fe1b143b445ac9093079 (diff)
downloadserenity-78f1b5e359f1436f90d1c54929be30c0e41d8df0.zip
Kernel: Fix some problems with Thread::wait_on and Lock
This changes the Thread::wait_on function to not enable interrupts upon leaving, which caused some problems with page fault handlers and in other situations. It may now be called from critical sections, with interrupts enabled or disabled, and returns to the same state. This also requires some fixes to Lock. To aid debugging, a new define LOCK_DEBUG is added that enables checking for Lock leaks upon finalization of a Thread.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Arch/i386/CPU.h8
-rw-r--r--Kernel/Forward.h1
-rw-r--r--Kernel/Lock.cpp122
-rw-r--r--Kernel/Lock.h17
-rw-r--r--Kernel/Scheduler.cpp15
-rw-r--r--Kernel/Scheduler.h1
-rw-r--r--Kernel/Thread.cpp24
-rw-r--r--Kernel/Thread.h58
-rw-r--r--Kernel/VM/Region.cpp7
9 files changed, 174 insertions, 79 deletions
diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h
index 9ae0f3941b..9f6a6b9d1c 100644
--- a/Kernel/Arch/i386/CPU.h
+++ b/Kernel/Arch/i386/CPU.h
@@ -1052,14 +1052,6 @@ public:
return *this;
}
- void set_interrupt_flag_on_destruction(bool flag)
- {
- if (flag)
- m_prev_flags |= 0x200;
- else
- m_prev_flags &= ~0x200;
- }
-
void leave()
{
ASSERT(m_valid);
diff --git a/Kernel/Forward.h b/Kernel/Forward.h
index 8711757201..8dc4241e0d 100644
--- a/Kernel/Forward.h
+++ b/Kernel/Forward.h
@@ -44,6 +44,7 @@ class InodeWatcher;
class KBuffer;
class KResult;
class LocalSocket;
+class Lock;
class MappedROM;
class MasterPTY;
class PageDirectory;
diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp
index 870fe9b86d..e7c523d571 100644
--- a/Kernel/Lock.cpp
+++ b/Kernel/Lock.cpp
@@ -27,78 +27,87 @@
#include <AK/TemporaryChange.h>
#include <Kernel/KSyms.h>
#include <Kernel/Lock.h>
-#include <Kernel/Thread.h>
namespace Kernel {
-static bool modes_conflict(Lock::Mode mode1, Lock::Mode mode2)
+#ifdef LOCK_DEBUG
+void Lock::lock(Mode mode)
{
- if (mode1 == Lock::Mode::Unlocked || mode2 == Lock::Mode::Unlocked)
- return false;
-
- if (mode1 == Lock::Mode::Shared && mode2 == Lock::Mode::Shared)
- return false;
-
- return true;
+ lock("unknown", 0, mode);
}
+void Lock::lock(const char* file, int line, Mode mode)
+#else
void Lock::lock(Mode mode)
+#endif
{
+ // NOTE: This may be called from an interrupt handler (not an IRQ handler)
+ // and also from within critical sections!
+ ASSERT(!Processor::current().in_irq());
ASSERT(mode != Mode::Unlocked);
- if (!are_interrupts_enabled()) {
- klog() << "Interrupts disabled when trying to take Lock{" << m_name << "}";
- dump_backtrace();
- Processor::halt();
- }
auto current_thread = Thread::current();
+ ScopedCritical critical;
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
do {
// FIXME: Do not add new readers if writers are queued.
- bool modes_dont_conflict = !modes_conflict(m_mode, mode);
- bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
- if (modes_dont_conflict || already_hold_exclusive_lock) {
+ bool can_lock;
+ switch (m_mode) {
+ case Lock::Mode::Unlocked:
+ can_lock = true;
+ break;
+ case Lock::Mode::Shared:
+ can_lock = (mode == Lock::Mode::Shared);
+ break;
+ case Lock::Mode::Exclusive:
+ can_lock = (m_holder == current_thread);
+ break;
+ }
+ if (can_lock) {
// We got the lock!
- if (!already_hold_exclusive_lock)
+ if (m_mode == Lock::Mode::Unlocked) {
m_mode = mode;
- m_holder = current_thread;
+ ASSERT(m_times_locked == 0);
+ if (mode == Mode::Exclusive)
+ m_holder = current_thread;
+ }
+#ifdef LOCK_DEBUG
+ current_thread->holding_lock(*this, true, file, line);
+#endif
m_times_locked++;
m_lock.store(false, AK::memory_order_release);
return;
}
} while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked);
- } else if (Processor::current().in_critical()) {
- // If we're in a critical section and trying to lock, no context
- // switch will happen, so yield.
- // The assumption is that if we call this from a critical section
- // that we DO want to temporarily leave it
- u32 prev_flags;
- u32 prev_crit = Processor::current().clear_critical(prev_flags, !Processor::current().in_irq());
-
- Scheduler::yield();
-
- // Note, we may now be on a different CPU!
- Processor::current().restore_critical(prev_crit, prev_flags);
} else {
- // We need to process e.g. smp messages
- Processor::wait_check();
+ // I don't know *who* is using "m_lock", so just yield.
+ Scheduler::yield_from_critical();
}
}
}
void Lock::unlock()
{
+ // NOTE: This may be called from an interrupt handler (not an IRQ handler)
+ // and also from within critical sections!
+ ASSERT(!Processor::current().in_irq());
auto current_thread = Thread::current();
+ ScopedCritical critical;
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
ASSERT(m_times_locked);
--m_times_locked;
ASSERT(m_mode != Mode::Unlocked);
- if (m_mode == Mode::Exclusive)
+
+ if (m_mode == Mode::Exclusive) {
ASSERT(m_holder == current_thread);
- if (m_holder == current_thread && (m_mode == Mode::Shared || m_times_locked == 0))
- m_holder = nullptr;
+ if (m_times_locked == 0)
+ m_holder = nullptr;
+ }
+#ifdef LOCK_DEBUG
+ current_thread->holding_lock(*this, false);
+#endif
if (m_times_locked > 0) {
m_lock.store(false, AK::memory_order_release);
@@ -109,29 +118,36 @@ void Lock::unlock()
return;
}
// I don't know *who* is using "m_lock", so just yield.
- // The assumption is that if we call this from a critical section
- // that we DO want to temporarily leave it
- u32 prev_flags;
- u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
-
- Scheduler::yield();
-
- // Note, we may now be on a different CPU!
- Processor::current().restore_critical(prev_crit, prev_flags);
+ Scheduler::yield_from_critical();
}
}
bool Lock::force_unlock_if_locked()
{
- ASSERT(m_mode != Mode::Shared);
+ // NOTE: This may be called from an interrupt handler (not an IRQ handler)
+ // and also from within critical sections!
+ ASSERT(!Processor::current().in_irq());
ScopedCritical critical;
- if (m_holder != Thread::current())
- return false;
- ASSERT(m_times_locked == 1);
- m_holder = nullptr;
- m_mode = Mode::Unlocked;
- m_times_locked = 0;
- m_queue.wake_one();
+ for (;;) {
+ if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
+ if (m_holder != Thread::current()) {
+ m_lock.store(false, AK::MemoryOrder::memory_order_release);
+ return false;
+ }
+ ASSERT(m_mode != Mode::Shared);
+ ASSERT(m_times_locked == 1);
+#ifdef LOCK_DEBUG
+ m_holder->holding_lock(*this, false);
+#endif
+ m_holder = nullptr;
+ m_mode = Mode::Unlocked;
+ m_times_locked = 0;
+ m_queue.wake_one(&m_lock);
+ break;
+ }
+ // I don't know *who* is using "m_lock", so just yield.
+ Scheduler::yield_from_critical();
+ }
return true;
}
diff --git a/Kernel/Lock.h b/Kernel/Lock.h
index 5d27ff936f..81971551be 100644
--- a/Kernel/Lock.h
+++ b/Kernel/Lock.h
@@ -31,6 +31,7 @@
#include <AK/Types.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Forward.h>
+#include <Kernel/Thread.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
@@ -50,6 +51,9 @@ public:
};
void lock(Mode = Mode::Exclusive);
+#ifdef LOCK_DEBUG
+ void lock(const char* file, int line, Mode mode = Mode::Exclusive);
+#endif
void unlock();
bool force_unlock_if_locked();
bool is_locked() const { return m_holder; }
@@ -77,6 +81,13 @@ private:
class Locker {
public:
+#ifdef LOCK_DEBUG
+ ALWAYS_INLINE explicit Locker(const char* file, int line, Lock& l, Lock::Mode mode = Lock::Mode::Exclusive)
+ : m_lock(l)
+ {
+ m_lock.lock(file, line, mode);
+ }
+#endif
ALWAYS_INLINE explicit Locker(Lock& l, Lock::Mode mode = Lock::Mode::Exclusive)
: m_lock(l)
{
@@ -90,7 +101,11 @@ private:
Lock& m_lock;
};
-#define LOCKER(...) Locker locker(__VA_ARGS__)
+#ifdef LOCK_DEBUG
+# define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__)
+#else
+# define LOCKER(...) Locker locker(__VA_ARGS__)
+#endif
template<typename T>
class Lockable {
diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp
index 20ef1e6810..759732b873 100644
--- a/Kernel/Scheduler.cpp
+++ b/Kernel/Scheduler.cpp
@@ -511,6 +511,21 @@ void Scheduler::invoke_async()
pick_next();
}
+void Scheduler::yield_from_critical()
+{
+ auto& proc = Processor::current();
+ ASSERT(proc.in_critical());
+ ASSERT(!proc.in_irq());
+
+ yield(); // Flag a context switch
+
+ u32 prev_flags;
+ u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
+
+ // Note, we may now be on a different CPU!
+ Processor::current().restore_critical(prev_crit, prev_flags);
+}
+
void Scheduler::notify_finalizer()
{
if (g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel) == false)
diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h
index c26fe3ac7e..55a05ddc6c 100644
--- a/Kernel/Scheduler.h
+++ b/Kernel/Scheduler.h
@@ -57,6 +57,7 @@ public:
[[noreturn]] static void start();
static bool pick_next();
static bool yield();
+ static void yield_from_critical();
static bool donate_to_and_switch(Thread*, const char* reason);
static bool donate_to(RefPtr<Thread>&, const char* reason);
static bool context_switch(Thread*);
diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp
index e5e2db25e6..512bb4c71e 100644
--- a/Kernel/Thread.cpp
+++ b/Kernel/Thread.cpp
@@ -316,7 +316,16 @@ void Thread::finalize()
ASSERT(Thread::current() == g_finalizer);
ASSERT(Thread::current() != this);
+#ifdef LOCK_DEBUG
ASSERT(!m_lock.own_lock());
+ if (lock_count() > 0) {
+ dbg() << "Thread " << *this << " leaking " << lock_count() << " Locks!";
+ ScopedSpinLock list_lock(m_holding_locks_lock);
+ for (auto& info : m_holding_locks_list)
+ dbg() << " - " << info.lock->name() << " @ " << info.lock << " locked at " << info.file << ":" << info.line << " count: " << info.count;
+ ASSERT_NOT_REACHED();
+ }
+#endif
{
ScopedSpinLock lock(g_scheduler_lock);
@@ -1080,10 +1089,9 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
}
});
if (!timer) {
+ if (lock)
+ *lock = false;
// We timed out already, don't block
- // The API contract guarantees we return with interrupts enabled,
- // regardless of how we got called
- critical.set_interrupt_flag_on_destruction(true);
return BlockResult::InterruptedByTimeout;
}
}
@@ -1094,16 +1102,13 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
// The WaitQueue was already requested to wake someone when
// nobody was waiting. So return right away as we shouldn't
// be waiting
-
- // The API contract guarantees we return with interrupts enabled,
- // regardless of how we got called
- critical.set_interrupt_flag_on_destruction(true);
+ // NOTE: Do not set lock to false in this case!
return BlockResult::NotBlocked;
}
- did_unlock = unlock_process_if_locked();
if (lock)
*lock = false;
+ did_unlock = unlock_process_if_locked();
set_state(State::Queued);
m_wait_reason = reason;
@@ -1158,9 +1163,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const
TimerQueue::the().cancel_timer(timer.release_nonnull());
}
- // The API contract guarantees we return with interrupts enabled,
- // regardless of how we got called
- sti();
return result;
}
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index bb40634f18..6dea7b95ec 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -45,6 +45,8 @@
#include <LibC/fd_set.h>
#include <LibELF/AuxiliaryVector.h>
+//#define LOCK_DEBUG
+
namespace Kernel {
enum class DispatchSignalResult {
@@ -961,6 +963,44 @@ public:
RecursiveSpinLock& get_lock() const { return m_lock; }
+#ifdef LOCK_DEBUG
+ void holding_lock(Lock& lock, bool holding, const char* file = nullptr, int line = 0)
+ {
+ m_holding_locks.fetch_add(holding ? 1 : -1, AK::MemoryOrder::memory_order_relaxed);
+ ScopedSpinLock list_lock(m_holding_locks_lock);
+ if (holding) {
+ bool have_existing = false;
+ for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
+ auto& info = m_holding_locks_list[i];
+ if (info.lock == &lock) {
+ have_existing = true;
+ info.count++;
+ break;
+ }
+ }
+ if (!have_existing)
+ m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 });
+ } else {
+ bool found = false;
+ for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
+ auto& info = m_holding_locks_list[i];
+ if (info.lock == &lock) {
+ ASSERT(info.count > 0);
+ if (--info.count == 0)
+ m_holding_locks_list.remove(i);
+ found = true;
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+ }
+ u32 lock_count() const
+ {
+ return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
+ }
+#endif
+
private:
IntrusiveListNode m_runnable_list_node;
IntrusiveListNode m_wait_queue_node;
@@ -1004,9 +1044,11 @@ private:
auto& blocker = static_cast<JoinBlocker&>(b);
// NOTE: m_lock is held already!
- if (m_thread_did_exit)
+ if (m_thread_did_exit) {
blocker.unblock(exit_value(), true);
- return m_thread_did_exit;
+ return false;
+ }
+ return true;
}
private:
@@ -1050,6 +1092,18 @@ private:
const char* m_wait_reason { nullptr };
WaitQueue* m_queue { nullptr };
+#ifdef LOCK_DEBUG
+ struct HoldingLockInfo {
+ Lock* lock;
+ const char* file;
+ int line;
+ unsigned count;
+ };
+ Atomic<u32> m_holding_locks { 0 };
+ SpinLock<u8> m_holding_locks_lock;
+ Vector<HoldingLockInfo> m_holding_locks_list;
+#endif
+
JoinBlockCondition m_join_condition;
Atomic<bool> m_is_active { false };
bool m_is_joinable { true };
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index 9552264eb1..4182dee77c 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -474,10 +474,9 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
ASSERT_INTERRUPTS_DISABLED();
ASSERT(vmobject().is_inode());
- sti();
LOCKER(vmobject().m_paging_lock);
- cli();
+ ASSERT_INTERRUPTS_DISABLED();
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region];
@@ -501,7 +500,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
#ifdef MM_DEBUG
dbg() << "MM: page_in_from_inode ready to read from inode";
#endif
- sti();
+
u8 page_buffer[PAGE_SIZE];
auto& inode = inode_vmobject.inode();
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
@@ -514,7 +513,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
}
- cli();
+
vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
if (vmobject_physical_page_entry.is_null()) {
klog() << "MM: handle_inode_fault was unable to allocate a physical page";