summaryrefslogtreecommitdiff
path: root/AK
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2019-10-12 11:17:34 -0600
committerAndreas Kling <awesomekling@gmail.com>2019-10-12 19:30:59 +0200
commitb0773a8ea649fd524af83504c2f9dff02ce46ee8 (patch)
treec8c843abb1dad372e346731e2843a53dbc26bbce /AK
parent2530378f59ff8152a3da94c33569f2c246c9f761 (diff)
downloadserenity-b0773a8ea649fd524af83504c2f9dff02ce46ee8.zip
AK: Add Atomic.h
Use gcc built-in atomics
Diffstat (limited to 'AK')
-rw-r--r--AK/Atomic.h238
-rw-r--r--AK/Tests/Makefile5
-rw-r--r--AK/Tests/TestAtomic.cpp338
3 files changed, 580 insertions, 1 deletions
diff --git a/AK/Atomic.h b/AK/Atomic.h
new file mode 100644
index 0000000000..0ab0b19601
--- /dev/null
+++ b/AK/Atomic.h
@@ -0,0 +1,238 @@
+#pragma once
+
+namespace AK {
+
+enum MemoryOrder
+{
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+};
+
+template <typename T>
+class Atomic
+{
+ T m_value { 0 };
+
+public:
+ Atomic() noexcept = default;
+ Atomic(const Atomic&) = delete;
+ Atomic& operator=(const Atomic&) volatile = delete;
+
+ Atomic(T val) noexcept:
+ m_value(val)
+ {
+ }
+
+ T exchange(T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_exchange_n(&m_value, desired, order);
+ }
+
+ bool compare_exchange_strong(T& expected, T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ if (order == memory_order_acq_rel || order == memory_order_release)
+ return __atomic_compare_exchange_n(&m_value, &expected, desired, false, memory_order_release, memory_order_acquire);
+ else
+ return __atomic_compare_exchange_n(&m_value, &expected, desired, false, order, order);
+ }
+
+ T operator++() volatile noexcept
+ {
+ return fetch_add(1) + 1;
+ }
+
+ T operator++(int) volatile noexcept
+ {
+ return fetch_add(1);
+ }
+
+ T operator+=(T val) volatile noexcept
+ {
+ return fetch_add(val) + val;
+ }
+
+ T fetch_add(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_add(&m_value, val, order);
+ }
+
+ T operator--() volatile noexcept
+ {
+ return fetch_sub(1) - 1;
+ }
+
+ T operator--(int) volatile noexcept
+ {
+ return fetch_sub(1);
+ }
+
+ T operator-=(T val) volatile noexcept
+ {
+ return fetch_sub(val) - val;
+ }
+
+ T fetch_sub(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_sub(&m_value, val, order);
+ }
+
+ T operator&=(T val) volatile noexcept
+ {
+ return fetch_and(val) & val;
+ }
+
+ T fetch_and(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_and(&m_value, val, order);
+ }
+
+ T operator|=(T val) volatile noexcept
+ {
+ return fetch_or(val) | val;
+ }
+
+ T fetch_or(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_or(&m_value, val, order);
+ }
+
+ T operator^=(T val) volatile noexcept
+ {
+ return fetch_xor(val) ^ val;
+ }
+
+ T fetch_xor(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_xor(&m_value, val, order);
+ }
+
+ operator T() const volatile noexcept
+ {
+ return load();
+ }
+
+ T load(MemoryOrder order = memory_order_seq_cst) const volatile noexcept
+ {
+ return __atomic_load_n(&m_value, order);
+ }
+
+ T operator=(T desired) volatile noexcept
+ {
+ store(desired);
+ return desired;
+ }
+
+ void store(T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ __atomic_store_n(&m_value, desired, order);
+ }
+
+ bool is_lock_free() const volatile noexcept
+ {
+ return __atomic_is_lock_free(sizeof(m_value), &m_value);
+ }
+};
+
+
+template <typename T>
+class Atomic<T*>
+{
+ T* m_value { nullptr };
+
+public:
+ Atomic() noexcept = default;
+ Atomic(const Atomic&) = delete;
+ Atomic& operator=(const Atomic&) volatile = delete;
+
+ Atomic(T* val) noexcept:
+ m_value(val)
+ {
+ }
+
+ T* exchange(T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_exchange_n(&m_value, desired, order);
+ }
+
+ bool compare_exchange_strong(T*& expected, T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ if (order == memory_order_acq_rel || order == memory_order_release)
+ return __atomic_compare_exchange_n(&m_value, &expected, desired, false, memory_order_release, memory_order_acquire);
+ else
+ return __atomic_compare_exchange_n(&m_value, &expected, desired, false, order, order);
+ }
+
+ T* operator++() volatile noexcept
+ {
+ return fetch_add(1) + 1;
+ }
+
+ T* operator++(int) volatile noexcept
+ {
+ return fetch_add(1);
+ }
+
+ T* operator+=(ptrdiff_t val) volatile noexcept
+ {
+ return fetch_add(val) + val;
+ }
+
+ T* fetch_add(ptrdiff_t val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_add(&m_value, val * sizeof(*m_value), order);
+ }
+
+ T* operator--() volatile noexcept
+ {
+ return fetch_sub(1) - 1;
+ }
+
+ T* operator--(int) volatile noexcept
+ {
+ return fetch_sub(1);
+ }
+
+ T* operator-=(ptrdiff_t val) volatile noexcept
+ {
+ return fetch_sub(val) - val;
+ }
+
+ T* fetch_sub(ptrdiff_t val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_fetch_sub(&m_value, val * sizeof(*m_value), order);
+ }
+
+ operator T*() const volatile noexcept
+ {
+ return load();
+ }
+
+ T* load(MemoryOrder order = memory_order_seq_cst) const volatile noexcept
+ {
+ return __atomic_load_n(&m_value, order);
+ }
+
+ T* operator=(T* desired) volatile noexcept
+ {
+ store(desired);
+ return desired;
+ }
+
+ void store(T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
+ {
+ __atomic_store_n(&m_value, desired, order);
+ }
+
+ bool is_lock_free() const volatile noexcept
+ {
+ return __atomic_is_lock_free(sizeof(m_value), &m_value);
+ }
+};
+
+}
+
+using AK::Atomic;
diff --git a/AK/Tests/Makefile b/AK/Tests/Makefile
index 02c293bf2b..50f5339a23 100644
--- a/AK/Tests/Makefile
+++ b/AK/Tests/Makefile
@@ -1,4 +1,4 @@
-PROGRAMS = TestString TestQueue TestVector TestHashMap TestJSON TestWeakPtr TestNonnullRefPtr TestRefPtr TestFixedArray TestFileSystemPath TestURL TestStringView TestUtf8
+PROGRAMS = TestAtomic TestString TestQueue TestVector TestHashMap TestJSON TestWeakPtr TestNonnullRefPtr TestRefPtr TestFixedArray TestFileSystemPath TestURL TestStringView TestUtf8
CXXFLAGS = -std=c++17 -Wall -Wextra -ggdb3 -O2 -I../ -I../../
@@ -25,6 +25,9 @@ endef
all: $(PROGRAMS)
$(foreach x,$(PROGRAMS),$(call execute-command,./$(x)))
+TestAtomic: TestAtomic.o $(SHARED_TEST_OBJS)
+ $(PRE_CXX) $(CXX) $(CXXFLAGS) -o $@ TestAtomic.o $(SHARED_TEST_OBJS)
+
TestString: TestString.o $(SHARED_TEST_OBJS)
$(PRE_CXX) $(CXX) $(CXXFLAGS) -o $@ TestString.o $(SHARED_TEST_OBJS)
diff --git a/AK/Tests/TestAtomic.cpp b/AK/Tests/TestAtomic.cpp
new file mode 100644
index 0000000000..89eef2cb66
--- /dev/null
+++ b/AK/Tests/TestAtomic.cpp
@@ -0,0 +1,338 @@
+#include <AK/TestSuite.h>
+
+#include <AK/Atomic.h>
+
+TEST_CASE(construct_empty)
+{
+ EXPECT(Atomic<bool>().load() == false);
+ EXPECT(Atomic<u32>().load() == 0);
+ EXPECT(Atomic<u16>().load() == 0);
+ EXPECT(Atomic<u8>().load() == 0);
+
+ EXPECT(Atomic<u16*>().load() == nullptr);
+}
+
+TEST_CASE(construct_with_value)
+{
+ EXPECT(Atomic<bool>(false).load() == false);
+ EXPECT(Atomic<bool>(true).load() == true);
+ EXPECT(Atomic<u32>(2).load() == 2);
+ EXPECT(Atomic<u16>(3).load() == 3);
+ EXPECT(Atomic<u8>(4).load() == 4);
+
+ u16 v_u16 = 0;
+ EXPECT(Atomic<u16*>(&v_u16).load() == &v_u16);
+}
+
+TEST_CASE(do_exchange)
+{
+ Atomic<bool> a_bool(false);
+ EXPECT(a_bool.exchange(true) == false);
+ EXPECT(a_bool.load() == true && static_cast<bool>(a_bool) == true);
+
+ Atomic<u32> a_u32(2);
+ EXPECT(a_u32.exchange(22) == 2);
+ EXPECT(a_u32.load() == 22 && static_cast<u8>(a_u32) == 22);
+
+ Atomic<u16> a_u16(3);
+ EXPECT(a_u16.exchange(33) == 3);
+ EXPECT(a_u16.load() == 33 && static_cast<u8>(a_u16) == 33);
+
+ Atomic<u8> a_u8(4);
+ EXPECT(a_u8.exchange(44) == 4);
+ EXPECT(a_u8.load() == 44 && static_cast<u8>(a_u8) == 44);
+
+ u16 v_u16[6];
+ Atomic<u16*> a_pu16(&v_u16[2]);
+ EXPECT(a_pu16.load() == &v_u16[2] && static_cast<u16*>(a_pu16) == &v_u16[2]);
+}
+
+TEST_CASE(do_compare_exchange)
+{
+ Atomic<bool> a_bool(false);
+ bool e_bool = true;
+ EXPECT(a_bool.compare_exchange_strong(e_bool, true) == false);
+ EXPECT(e_bool == false);
+ EXPECT(a_bool.load() == false && static_cast<bool>(a_bool) == false);
+ e_bool = false;
+ EXPECT(a_bool.compare_exchange_strong(e_bool, true) == true);
+ EXPECT(a_bool.load() == true && static_cast<bool>(a_bool) == true);
+
+ Atomic<u32> a_u32(2);
+ u32 e_u32 = 99;
+ EXPECT(a_u32.compare_exchange_strong(e_u32, 22) == false);
+ EXPECT(e_u32 == 2);
+ EXPECT(a_u32.load() == 2 && static_cast<u32>(a_u32) == 2);
+ e_u32 = 2;
+ EXPECT(a_u32.compare_exchange_strong(e_u32, 22) == true);
+ EXPECT(a_u32.load() == 22 && static_cast<u32>(a_u32) == 22);
+
+ Atomic<u16> a_u16(3);
+ u16 e_u16 = 99;
+ EXPECT(a_u16.compare_exchange_strong(e_u16, 33) == false);
+ EXPECT(e_u16 == 3);
+ EXPECT(a_u16.load() == 3 && static_cast<u16>(a_u16) == 3);
+ e_u16 = 3;
+ EXPECT(a_u16.compare_exchange_strong(e_u16, 33) == true);
+ EXPECT(a_u16.load() == 33 && static_cast<u16>(a_u16) == 33);
+
+ Atomic<u8> a_u8(4);
+ u8 e_u8 = 99;
+ EXPECT(a_u8.compare_exchange_strong(e_u8, 44) == false);
+ EXPECT(e_u8 == 4);
+ EXPECT(a_u8.load() == 4 && static_cast<u16>(a_u8) == 4);
+ e_u8 = 4;
+ EXPECT(a_u8.compare_exchange_strong(e_u8, 44) == true);
+ EXPECT(a_u8.load() == 44 && static_cast<u16>(a_u8) == 44);
+}
+
+TEST_CASE(fetch_add)
+{
+ Atomic<u32> a_u32(5);
+ EXPECT(a_u32.fetch_add(2) == 5);
+ EXPECT(a_u32.load() == 7 && static_cast<u32>(a_u32) == 7);
+
+ Atomic<u16> a_u16(5);
+ EXPECT(a_u16.fetch_add(2) == 5);
+ EXPECT(a_u16.load() == 7 && static_cast<u16>(a_u16) == 7);
+
+ Atomic<u8> a_u8(5);
+ EXPECT(a_u8.fetch_add(2) == 5);
+ EXPECT(a_u8.load() == 7 && static_cast<u8>(a_u8) == 7);
+
+ u32 v_u32[6];
+ Atomic<u32*> a_pu32(&v_u32[2]);
+ EXPECT(a_pu32.load() == &v_u32[2] && static_cast<u32*>(a_pu32) == &v_u32[2]);
+ EXPECT(a_pu32.fetch_add(2) == &v_u32[2]);
+ EXPECT(a_pu32.load() == &v_u32[4] && static_cast<u32*>(a_pu32) == &v_u32[4]);
+ EXPECT(a_pu32.fetch_add(-3) == &v_u32[4]);
+ EXPECT(a_pu32.load() == &v_u32[1] && static_cast<u32*>(a_pu32) == &v_u32[1]);
+
+ u16 v_u16[6];
+ Atomic<u16*> a_pu16(&v_u16[2]);
+ EXPECT(a_pu16.load() == &v_u16[2] && static_cast<u16*>(a_pu16) == &v_u16[2]);
+ EXPECT(a_pu16.fetch_add(2) == &v_u16[2]);
+ EXPECT(a_pu16.load() == &v_u16[4] && static_cast<u16*>(a_pu16) == &v_u16[4]);
+ EXPECT(a_pu16.fetch_add(-3) == &v_u16[4]);
+ EXPECT(a_pu16.load() == &v_u16[1] && static_cast<u16*>(a_pu16) == &v_u16[1]);
+
+ u8 v_u8[6];
+ Atomic<u8*> a_pu8(&v_u8[2]);
+ EXPECT(a_pu8.load() == &v_u8[2] && static_cast<u8*>(a_pu8) == &v_u8[2]);
+ EXPECT(a_pu8.fetch_add(2) == &v_u8[2]);
+ EXPECT(a_pu8.load() == &v_u8[4] && static_cast<u8*>(a_pu8) == &v_u8[4]);
+ EXPECT(a_pu8.fetch_add(-3) == &v_u8[4]);
+ EXPECT(a_pu8.load() == &v_u8[1] && static_cast<u8*>(a_pu8) == &v_u8[1]);
+}
+
+TEST_CASE(fetch_sub)
+{
+ Atomic<u32> a_u32(5);
+ EXPECT(a_u32.fetch_sub(2) == 5);
+ EXPECT(a_u32.load() == 3 && static_cast<u32>(a_u32) == 3);
+
+ Atomic<u16> a_u16(5);
+ EXPECT(a_u16.fetch_sub(2) == 5);
+ EXPECT(a_u16.load() == 3 && static_cast<u16>(a_u16) == 3);
+
+ Atomic<u8> a_u8(5);
+ EXPECT(a_u8.fetch_sub(2) == 5);
+ EXPECT(a_u8.load() == 3 && static_cast<u8>(a_u8) == 3);
+
+ u32 v_u32[6];
+ Atomic<u32*> a_pu32(&v_u32[2]);
+ EXPECT(a_pu32.load() == &v_u32[2] && static_cast<u32*>(a_pu32) == &v_u32[2]);
+ EXPECT(a_pu32.fetch_sub(2) == &v_u32[2]);
+ EXPECT(a_pu32.load() == &v_u32[0] && static_cast<u32*>(a_pu32) == &v_u32[0]);
+ EXPECT(a_pu32.fetch_sub(-3) == &v_u32[0]);
+ EXPECT(a_pu32.load() == &v_u32[3] && static_cast<u32*>(a_pu32) == &v_u32[3]);
+
+ u16 v_u16[6];
+ Atomic<u16*> a_pu16(&v_u16[2]);
+ EXPECT(a_pu16.load() == &v_u16[2] && static_cast<u16*>(a_pu16) == &v_u16[2]);
+ EXPECT(a_pu16.fetch_sub(2) == &v_u16[2]);
+ EXPECT(a_pu16.load() == &v_u16[0] && static_cast<u16*>(a_pu16) == &v_u16[0]);
+ EXPECT(a_pu16.fetch_sub(-3) == &v_u16[0]);
+ EXPECT(a_pu16.load() == &v_u16[3] && static_cast<u16*>(a_pu16) == &v_u16[3]);
+
+ u8 v_u8[6];
+ Atomic<u8*> a_pu8(&v_u8[2]);
+ EXPECT(a_pu8.load() == &v_u8[2] && static_cast<u8*>(a_pu8) == &v_u8[2]);
+ EXPECT(a_pu8.fetch_sub(2) == &v_u8[2]);
+ EXPECT(a_pu8.load() == &v_u8[0] && static_cast<u8*>(a_pu8) == &v_u8[0]);
+ EXPECT(a_pu8.fetch_sub(-3) == &v_u8[0]);
+ EXPECT(a_pu8.load() == &v_u8[3] && static_cast<u8*>(a_pu8) == &v_u8[3]);
+}
+
+TEST_CASE(fetch_inc)
+{
+ Atomic<u32> a_u32(5);
+ EXPECT(a_u32++ == 5);
+ EXPECT(a_u32.load() == 6 && a_u32 == 6);
+ EXPECT(++a_u32 == 7);
+ EXPECT(a_u32.load() == 7 && a_u32 == 7);
+ EXPECT((a_u32 += 2) == 9);
+ EXPECT(a_u32.load() == 9 && a_u32 == 9);
+
+ Atomic<u16> a_u16(5);
+ EXPECT(a_u16++ == 5);
+ EXPECT(a_u16.load() == 6 && a_u16 == 6);
+ EXPECT(++a_u16 == 7);
+ EXPECT(a_u16.load() == 7 && a_u16 == 7);
+ EXPECT((a_u16 += 2) == 9);
+ EXPECT(a_u16.load() == 9 && a_u16 == 9);
+
+ Atomic<u8> a_u8(5);
+ EXPECT(a_u8++ == 5);
+ EXPECT(a_u8.load() == 6 && a_u8 == 6);
+ EXPECT(++a_u8 == 7);
+ EXPECT(a_u8.load() == 7 && a_u8 == 7);
+ EXPECT((a_u8 += 2) == 9);
+ EXPECT(a_u8.load() == 9 && a_u8 == 9);
+
+ u32 v_u32[8];
+ Atomic<u32*> a_pu32(&v_u32[2]);
+ EXPECT(a_pu32++ == &v_u32[2]);
+ EXPECT(a_pu32.load() == &v_u32[3] && a_pu32 == &v_u32[3]);
+ EXPECT(++a_pu32 == &v_u32[4]);
+ EXPECT(a_pu32.load() == &v_u32[4] && a_pu32 == &v_u32[4]);
+ EXPECT((a_pu32 += 2) == &v_u32[6]);
+ EXPECT(a_pu32.load() == &v_u32[6] && a_pu32 == &v_u32[6]);
+
+ u16 v_u16[8];
+ Atomic<u16*> a_pu16(&v_u16[2]);
+ EXPECT(a_pu16++ == &v_u16[2]);
+ EXPECT(a_pu16.load() == &v_u16[3] && a_pu16 == &v_u16[3]);
+ EXPECT(++a_pu16 == &v_u16[4]);
+ EXPECT(a_pu16.load() == &v_u16[4] && a_pu16 == &v_u16[4]);
+ EXPECT((a_pu16 += 2) == &v_u16[6]);
+ EXPECT(a_pu16.load() == &v_u16[6] && a_pu16 == &v_u16[6]);
+
+ u8 v_u8[8];
+ Atomic<u8*> a_pu8(&v_u8[2]);
+ EXPECT(a_pu8++ == &v_u8[2]);
+ EXPECT(a_pu8.load() == &v_u8[3] && a_pu8 == &v_u8[3]);
+ EXPECT(++a_pu8 == &v_u8[4]);
+ EXPECT(a_pu8.load() == &v_u8[4] && a_pu8 == &v_u8[4]);
+ EXPECT((a_pu8 += 2) == &v_u8[6]);
+ EXPECT(a_pu8.load() == &v_u8[6] && a_pu8 == &v_u8[6]);
+}
+
+TEST_CASE(fetch_dec)
+{
+ Atomic<u32> a_u32(5);
+ EXPECT(a_u32-- == 5);
+ EXPECT(a_u32.load() == 4 && a_u32 == 4);
+ EXPECT(--a_u32 == 3);
+ EXPECT(a_u32.load() == 3 && a_u32 == 3);
+ EXPECT((a_u32 -= 2) == 1);
+ EXPECT(a_u32.load() == 1 && a_u32 == 1);
+
+ Atomic<u16> a_u16(5);
+ EXPECT(a_u16-- == 5);
+ EXPECT(a_u16.load() == 4 && a_u16 == 4);
+ EXPECT(--a_u16 == 3);
+ EXPECT(a_u16.load() == 3 && a_u16 == 3);
+ EXPECT((a_u16 -= 2) == 1);
+ EXPECT(a_u16.load() == 1 && a_u16 == 1);
+
+ Atomic<u8> a_u8(5);
+ EXPECT(a_u8-- == 5);
+ EXPECT(a_u8.load() == 4 && a_u8 == 4);
+ EXPECT(--a_u8 == 3);
+ EXPECT(a_u8.load() == 3 && a_u8 == 3);
+ EXPECT((a_u8 -= 2) == 1);
+ EXPECT(a_u8.load() == 1 && a_u8 == 1);
+
+ u32 v_u32[8];
+ Atomic<u32*> a_pu32(&v_u32[7]);
+ EXPECT(a_pu32-- == &v_u32[7]);
+ EXPECT(a_pu32.load() == &v_u32[6] && a_pu32 == &v_u32[6]);
+ EXPECT(--a_pu32 == &v_u32[5]);
+ EXPECT(a_pu32.load() == &v_u32[5] && a_pu32 == &v_u32[5]);
+ EXPECT((a_pu32 -= 2) == &v_u32[3]);
+ EXPECT(a_pu32.load() == &v_u32[3] && a_pu32 == &v_u32[3]);
+
+ u16 v_u16[8];
+ Atomic<u16*> a_pu16(&v_u16[7]);
+ EXPECT(a_pu16-- == &v_u16[7]);
+ EXPECT(a_pu16.load() == &v_u16[6] && a_pu16 == &v_u16[6]);
+ EXPECT(--a_pu16 == &v_u16[5]);
+ EXPECT(a_pu16.load() == &v_u16[5] && a_pu16 == &v_u16[5]);
+ EXPECT((a_pu16 -= 2) == &v_u16[3]);
+ EXPECT(a_pu16.load() == &v_u16[3] && a_pu16 == &v_u16[3]);
+
+ u8 v_u8[8];
+ Atomic<u8*> a_pu8(&v_u8[7]);
+ EXPECT(a_pu8-- == &v_u8[7]);
+ EXPECT(a_pu8.load() == &v_u8[6] && a_pu8 == &v_u8[6]);
+ EXPECT(--a_pu8 == &v_u8[5]);
+ EXPECT(a_pu8.load() == &v_u8[5] && a_pu8 == &v_u8[5]);
+ EXPECT((a_pu8 -= 2) == &v_u8[3]);
+ EXPECT(a_pu8.load() == &v_u8[3] && a_pu8 == &v_u8[3]);
+}
+
+TEST_CASE(fetch_and)
+{
+ Atomic<u32> a_u32(0xdeadbeef);
+ EXPECT(a_u32.fetch_and(0x8badf00d) == 0xdeadbeef);
+ EXPECT(a_u32.load() == 0x8aadb00d && static_cast<u32>(a_u32) == 0x8aadb00d);
+ a_u32 = 0xdeadbeef;
+ EXPECT((a_u32 &= 0x8badf00d) == 0x8aadb00d);
+
+ Atomic<u16> a_u16(0xbeef);
+ EXPECT(a_u16.fetch_and(0xf00d) == 0xbeef);
+ EXPECT(a_u16.load() == 0xb00d && static_cast<u16>(a_u16) == 0xb00d);
+ a_u16 = 0xbeef;
+ EXPECT((a_u16 &= 0xf00d) == 0xb00d);
+
+ Atomic<u8> a_u8(0xef);
+ EXPECT(a_u8.fetch_and(0x0d) == 0xef);
+ EXPECT(a_u8.load() == 0x0d && static_cast<u8>(a_u8) == 0x0d);
+ a_u8 = 0xef;
+ EXPECT((a_u8 &= 0x0d) == 0x0d);
+}
+
+TEST_CASE(fetch_or)
+{
+ Atomic<u32> a_u32(0xaadb00d);
+ EXPECT(a_u32.fetch_or(0xdeadbeef) == 0xaadb00d);
+ EXPECT(a_u32.load() == 0xdeadbeef && static_cast<u32>(a_u32) == 0xdeadbeef);
+ a_u32 = 0xaadb00d;
+ EXPECT((a_u32 |= 0xdeadbeef) == 0xdeadbeef);
+
+ Atomic<u16> a_u16(0xb00d);
+ EXPECT(a_u16.fetch_or(0xbeef) == 0xb00d);
+ EXPECT(a_u16.load() == 0xbeef && static_cast<u16>(a_u16) == 0xbeef);
+ a_u16 = 0xb00d;
+ EXPECT((a_u16 |= 0xbeef) == 0xbeef);
+
+ Atomic<u8> a_u8(0x0d);
+ EXPECT(a_u8.fetch_or(0xef) == 0x0d);
+ EXPECT(a_u8.load() == 0xef && static_cast<u8>(a_u8) == 0xef);
+ a_u8 = 0x0d;
+ EXPECT((a_u8 |= 0xef) == 0xef);
+}
+
+TEST_CASE(fetch_xor)
+{
+ Atomic<u32> a_u32(0x55004ee2);
+ EXPECT(a_u32.fetch_xor(0xdeadbeef) == 0x55004ee2);
+ EXPECT(a_u32.load() == 0x8badf00d && static_cast<u32>(a_u32) == 0x8badf00d);
+ a_u32 = 0x55004ee2;
+ EXPECT((a_u32 ^= 0xdeadbeef) == 0x8badf00d);
+
+ Atomic<u16> a_u16(0x4ee2);
+ EXPECT(a_u16.fetch_xor(0xbeef) == 0x4ee2);
+ EXPECT(a_u16.load() == 0xf00d && static_cast<u16>(a_u16) == 0xf00d);
+ a_u16 = 0x4ee2;
+ EXPECT((a_u16 ^= 0xbeef) == 0xf00d);
+
+ Atomic<u8> a_u8(0xe2);
+ EXPECT(a_u8.fetch_xor(0xef) == 0xe2);
+ EXPECT(a_u8.load() == 0x0d && static_cast<u8>(a_u8) == 0x0d);
+ a_u8 = 0xe2;
+ EXPECT((a_u8 ^= 0xef) == 0x0d);
+}
+
+TEST_MAIN(Atomic)