summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorAndrew Kaster <akaster@serenityos.org>2021-08-22 21:53:26 -0600
committerAndreas Kling <kling@serenityos.org>2021-08-28 20:53:38 +0200
commitdea62fe93c09900b1453519d6adc768eb177b18f (patch)
tree20164a3bc7c8858f974a95c35597f1d05cec4e9b /Kernel
parent70518e69f40869b2fb6cbf69e33cbd24ecfa6d61 (diff)
downloadserenity-dea62fe93c09900b1453519d6adc768eb177b18f.zip
Kernel: Guard the all processes list with a Spinlock rather than a Mutex
There are callers of processes().with or processes().for_each that require interrupts to be disabled. Taking a Mutexe with interrupts disabled is a recipe for deadlock, so convert this to a Spinlock.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/GlobalProcessExposed.cpp2
-rw-r--r--Kernel/Process.cpp14
-rw-r--r--Kernel/Process.h8
-rw-r--r--Kernel/Syscalls/kill.cpp2
4 files changed, 13 insertions, 13 deletions
diff --git a/Kernel/GlobalProcessExposed.cpp b/Kernel/GlobalProcessExposed.cpp
index f73a672a64..6388fc4825 100644
--- a/Kernel/GlobalProcessExposed.cpp
+++ b/Kernel/GlobalProcessExposed.cpp
@@ -916,7 +916,7 @@ KResult ProcFSRootDirectory::traverse_as_directory(unsigned fsid, Function<bool(
InodeIdentifier identifier = { fsid, component.component_index() };
callback({ component.name(), identifier, 0 });
}
- processes().for_each_shared([&](Process& process) {
+ processes().for_each([&](Process& process) {
VERIFY(!(process.pid() < 0));
u64 process_id = (u64)process.pid().value();
InodeIdentifier identifier = { fsid, static_cast<InodeIndex>(process_id << 36) };
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp
index 6299afa079..d434b37738 100644
--- a/Kernel/Process.cpp
+++ b/Kernel/Process.cpp
@@ -44,7 +44,7 @@ static void create_signal_trampoline();
RecursiveSpinlock g_profiling_lock;
static Atomic<pid_t> next_pid;
-static Singleton<MutexProtected<Process::List>> s_processes;
+static Singleton<SpinlockProtected<Process::List>> s_processes;
READONLY_AFTER_INIT HashMap<String, OwnPtr<Module>>* g_modules;
READONLY_AFTER_INIT Memory::Region* g_signal_trampoline_region;
@@ -55,7 +55,7 @@ MutexProtected<String>& hostname()
return *s_hostname;
}
-MutexProtected<Process::List>& processes()
+SpinlockProtected<Process::List>& processes()
{
return *s_processes;
}
@@ -86,7 +86,7 @@ UNMAP_AFTER_INIT void Process::initialize()
NonnullRefPtrVector<Process> Process::all_processes()
{
NonnullRefPtrVector<Process> output;
- processes().with_shared([&](const auto& list) {
+ processes().with([&](const auto& list) {
output.ensure_capacity(list.size_slow());
for (const auto& process : list)
output.append(NonnullRefPtr<Process>(process));
@@ -138,7 +138,7 @@ void Process::register_new(Process& process)
{
// Note: this is essentially the same like process->ref()
RefPtr<Process> new_process = process;
- processes().with_exclusive([&](auto& list) {
+ processes().with([&](auto& list) {
list.prepend(process);
});
}
@@ -301,7 +301,7 @@ bool Process::unref() const
// NOTE: We need to obtain the process list lock before doing anything,
// because otherwise someone might get in between us lowering the
// refcount and acquiring the lock.
- auto did_hit_zero = processes().with_exclusive([&](auto& list) {
+ auto did_hit_zero = processes().with([&](auto& list) {
auto new_ref_count = deref_base();
if (new_ref_count > 0)
return false;
@@ -418,7 +418,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory)
RefPtr<Process> Process::from_pid(ProcessID pid)
{
- return processes().with_shared([&](const auto& list) -> RefPtr<Process> {
+ return processes().with([&](const auto& list) -> RefPtr<Process> {
for (auto& process : list) {
if (process.pid() == pid)
return &process;
@@ -696,7 +696,7 @@ void Process::die()
m_threads_for_coredump.append(thread);
});
- processes().with_shared([&](const auto& list) {
+ processes().with([&](const auto& list) {
for (auto it = list.begin(); it != list.end();) {
auto& process = *it;
++it;
diff --git a/Kernel/Process.h b/Kernel/Process.h
index c891ee9ff5..71d33f372e 100644
--- a/Kernel/Process.h
+++ b/Kernel/Process.h
@@ -815,13 +815,13 @@ static_assert(sizeof(Process) == (PAGE_SIZE * 2));
extern RecursiveSpinlock g_profiling_lock;
-MutexProtected<Process::List>& processes();
+SpinlockProtected<Process::List>& processes();
template<IteratorFunction<Process&> Callback>
inline void Process::for_each(Callback callback)
{
VERIFY_INTERRUPTS_DISABLED();
- processes().with_shared([&](const auto& list) {
+ processes().with([&](const auto& list) {
for (auto it = list.begin(); it != list.end();) {
auto& process = *it;
++it;
@@ -835,7 +835,7 @@ template<IteratorFunction<Process&> Callback>
inline void Process::for_each_child(Callback callback)
{
ProcessID my_pid = pid();
- processes().with_shared([&](const auto& list) {
+ processes().with([&](const auto& list) {
for (auto it = list.begin(); it != list.end();) {
auto& process = *it;
++it;
@@ -876,7 +876,7 @@ inline IterationDecision Process::for_each_thread(Callback callback)
template<IteratorFunction<Process&> Callback>
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
{
- processes().with_shared([&](const auto& list) {
+ processes().with([&](const auto& list) {
for (auto it = list.begin(); it != list.end();) {
auto& process = *it;
++it;
diff --git a/Kernel/Syscalls/kill.cpp b/Kernel/Syscalls/kill.cpp
index 4033348e94..24339516db 100644
--- a/Kernel/Syscalls/kill.cpp
+++ b/Kernel/Syscalls/kill.cpp
@@ -65,7 +65,7 @@ KResult Process::do_killall(int signal)
KResult error = KSuccess;
// Send the signal to all processes we have access to for.
- processes().for_each_shared([&](auto& process) {
+ processes().for_each([&](auto& process) {
KResult res = KSuccess;
if (process.pid() == pid())
res = do_killself(signal);