summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Gianforcaro <b.gianfo@gmail.com>2020-12-26 01:47:08 -0800
committerAndreas Kling <kling@serenityos.org>2020-12-27 11:09:30 +0100
commit815d39886f4f0e835bb6300682bafc7dc266ac2b (patch)
tree6daf3e97a68b9ebf260a6ab339049600eb4cdc99
parent21a5524d010d5ef0ab8b5cccd82df9484e484cb2 (diff)
downloadserenity-815d39886f4f0e835bb6300682bafc7dc266ac2b.zip
Kernel: Tag more methods and types as [[nodiscard]]
Tag methods at where not obvserving the return value is an obvious error with [[nodiscard]] to catch potential future bugs.
-rw-r--r--Kernel/Random.h4
-rw-r--r--Kernel/SpinLock.h11
-rw-r--r--Kernel/Thread.h35
3 files changed, 26 insertions, 24 deletions
diff --git a/Kernel/Random.h b/Kernel/Random.h
index 5fe130d1c3..db4c29b4c3 100644
--- a/Kernel/Random.h
+++ b/Kernel/Random.h
@@ -87,12 +87,12 @@ public:
m_pools[pool].update(reinterpret_cast<const u8*>(&event_data), sizeof(T));
}
- bool is_seeded() const
+ [[nodiscard]] bool is_seeded() const
{
return m_reseed_number > 0;
}
- bool is_ready() const
+ [[nodiscard]] bool is_ready() const
{
return is_seeded() || m_p0_len >= reseed_threshold;
}
diff --git a/Kernel/SpinLock.h b/Kernel/SpinLock.h
index e2bf8e5daa..42da93492c 100644
--- a/Kernel/SpinLock.h
+++ b/Kernel/SpinLock.h
@@ -58,7 +58,7 @@ public:
Processor::current().leave_critical(prev_flags);
}
- ALWAYS_INLINE bool is_locked() const
+ [[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
@@ -105,12 +105,12 @@ public:
Processor::current().leave_critical(prev_flags);
}
- ALWAYS_INLINE bool is_locked() const
+ [[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
- ALWAYS_INLINE bool own_lock() const
+ [[nodiscard]] ALWAYS_INLINE bool own_lock() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
@@ -126,7 +126,8 @@ private:
};
template<typename LockType>
-class ScopedSpinLock {
+class NO_DISCARD ScopedSpinLock {
+
AK_MAKE_NONCOPYABLE(ScopedSpinLock);
public:
@@ -175,7 +176,7 @@ public:
m_have_lock = false;
}
- ALWAYS_INLINE bool have_lock() const
+ [[nodiscard]] ALWAYS_INLINE bool have_lock() const
{
return m_have_lock;
}
diff --git a/Kernel/Thread.h b/Kernel/Thread.h
index 6c44968f94..cd852f9958 100644
--- a/Kernel/Thread.h
+++ b/Kernel/Thread.h
@@ -113,7 +113,7 @@ public:
m_is_joinable = false;
}
- bool is_joinable() const
+ [[nodiscard]] bool is_joinable() const
{
ScopedSpinLock lock(m_lock);
return m_is_joinable;
@@ -181,7 +181,7 @@ public:
return m_type != type;
}
- bool was_interrupted() const
+ [[nodiscard]] bool was_interrupted() const
{
switch (m_type) {
case InterruptedBySignal:
@@ -192,7 +192,7 @@ public:
}
}
- bool timed_out() const
+ [[nodiscard]] bool timed_out() const
{
return m_type == InterruptedByTimeout;
}
@@ -330,7 +330,7 @@ public:
{
return m_was_interrupted_by_signal;
}
- bool was_interrupted() const
+ [[nodiscard]] bool was_interrupted() const
{
return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
}
@@ -734,10 +734,10 @@ public:
void resume_from_stopped();
- bool should_be_stopped() const;
- bool is_stopped() const { return m_state == Stopped; }
- bool is_blocked() const { return m_state == Blocked; }
- bool is_in_block() const
+ [[nodiscard]] bool should_be_stopped() const;
+ [[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
+ [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
+ [[nodiscard]] bool is_in_block() const
{
ScopedSpinLock lock(m_block_lock);
return m_in_block;
@@ -932,7 +932,7 @@ public:
// Tell this thread to unblock if needed,
// gracefully unwind the stack and die.
void set_should_die();
- bool should_die() const { return m_should_die; }
+ [[nodiscard]] bool should_die() const { return m_should_die; }
void die_if_needed();
void exit(void* = nullptr);
@@ -946,7 +946,7 @@ public:
void set_state(State, u8 = 0);
- bool is_initialized() const { return m_initialized; }
+ [[nodiscard]] bool is_initialized() const { return m_initialized; }
void set_initialized(bool initialized) { m_initialized = initialized; }
void send_urgent_signal_to_self(u8 signal);
@@ -963,11 +963,11 @@ public:
DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
DispatchSignalResult dispatch_signal(u8 signal);
void check_dispatch_pending_signal();
- bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
+ [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
void terminate_due_to_signal(u8 signal);
- bool should_ignore_signal(u8 signal) const;
- bool has_signal_handler(u8 signal) const;
- bool has_pending_signal(u8 signal) const;
+ [[nodiscard]] bool should_ignore_signal(u8 signal) const;
+ [[nodiscard]] bool has_signal_handler(u8 signal) const;
+ [[nodiscard]] bool has_pending_signal(u8 signal) const;
u32 pending_signals() const;
u32 pending_signals_for_state() const;
@@ -1030,12 +1030,13 @@ public:
{
m_is_active.store(active, AK::memory_order_release);
}
- bool is_active() const
+
+ [[nodiscard]] bool is_active() const
{
return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
}
- bool is_finalizable() const
+ [[nodiscard]] bool is_finalizable() const
{
// We can't finalize as long as this thread is still running
// Note that checking for Running state here isn't sufficient
@@ -1060,7 +1061,7 @@ public:
template<typename Callback>
static IterationDecision for_each(Callback);
- static bool is_runnable_state(Thread::State state)
+ [[nodiscard]] static bool is_runnable_state(Thread::State state)
{
return state == Thread::State::Running || state == Thread::State::Runnable;
}