diff --git a/include/atomic b/include/atomic
new file mode 100644
index 0000000..a455286
--- /dev/null
+++ b/include/atomic
@@ -0,0 +1,1552 @@
+// -*- C++ -*- header.
+
+// Copyright (C) 2008-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file include/atomic
+ * This is a Standard C++ Library header.
+ */
+
+// Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
+
+#ifndef _GLIBCXX_ATOMIC
+#define _GLIBCXX_ATOMIC 1
+
+#pragma GCC system_header
+
+#if __cplusplus < 201103L
+# include
+#else
+
+#include
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /**
+ * @addtogroup atomics
+ * @{
+ */
+
+#if __cplusplus >= 201703L
+# define __cpp_lib_atomic_is_always_lock_free 201603
+#endif
+
+ template
+ struct atomic;
+
+ /// atomic
+ // NB: No operators or fetch-operations for this type.
+ template<>
+ struct atomic
+ {
+ using value_type = bool;
+
+ private:
+ __atomic_base _M_base;
+
+ public:
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(bool __i) noexcept : _M_base(__i) { }
+
+ bool
+ operator=(bool __i) noexcept
+ { return _M_base.operator=(__i); }
+
+ bool
+ operator=(bool __i) volatile noexcept
+ { return _M_base.operator=(__i); }
+
+ operator bool() const noexcept
+ { return _M_base.load(); }
+
+ operator bool() const volatile noexcept
+ { return _M_base.load(); }
+
+ bool
+ is_lock_free() const noexcept { return _M_base.is_lock_free(); }
+
+ bool
+ is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); }
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_BOOL_LOCK_FREE == 2;
+#endif
+
+ void
+ store(bool __i, memory_order __m = memory_order_seq_cst) noexcept
+ { _M_base.store(__i, __m); }
+
+ void
+ store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept
+ { _M_base.store(__i, __m); }
+
+ bool
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return _M_base.load(__m); }
+
+ bool
+ load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+ { return _M_base.load(__m); }
+
+ bool
+ exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept
+ { return _M_base.exchange(__i, __m); }
+
+ bool
+ exchange(bool __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return _M_base.exchange(__i, __m); }
+
+ bool
+ compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
+ memory_order __m2) noexcept
+ { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
+
+ bool
+ compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
+ memory_order __m2) volatile noexcept
+ { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
+
+ bool
+ compare_exchange_weak(bool& __i1, bool __i2,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
+
+ bool
+ compare_exchange_weak(bool& __i1, bool __i2,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
+
+ bool
+ compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
+ memory_order __m2) noexcept
+ { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
+
+ bool
+ compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
+ memory_order __m2) volatile noexcept
+ { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
+
+ bool
+ compare_exchange_strong(bool& __i1, bool __i2,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
+
+ bool
+ compare_exchange_strong(bool& __i1, bool __i2,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
+ };
+
+#if __cplusplus <= 201703L
+# define _GLIBCXX20_INIT(I)
+#else
+# define _GLIBCXX20_INIT(I) = I
+#endif
+
+ /**
+ * @brief Generic atomic type, primary class template.
+ *
+ * @tparam _Tp Type to be made atomic, must be trivially copyable.
+ */
+ template
+ struct atomic
+ {
+ using value_type = _Tp;
+
+ private:
+ // Align 1/2/4/8/16-byte types to at least their size.
+ static constexpr int _S_min_alignment
+ = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
+ ? 0 : sizeof(_Tp);
+
+ static constexpr int _S_alignment
+ = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
+
+ alignas(_S_alignment) _Tp _M_i _GLIBCXX20_INIT(_Tp());
+
+ static_assert(__is_trivially_copyable(_Tp),
+ "std::atomic requires a trivially copyable type");
+
+ static_assert(sizeof(_Tp) > 0,
+ "Incomplete or zero-sized types are not supported");
+
+ public:
+ atomic() = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(_Tp __i) noexcept : _M_i(__i) { }
+
+ operator _Tp() const noexcept
+ { return load(); }
+
+ operator _Tp() const volatile noexcept
+ { return load(); }
+
+ _Tp
+ operator=(_Tp __i) noexcept
+ { store(__i); return __i; }
+
+ _Tp
+ operator=(_Tp __i) volatile noexcept
+ { store(__i); return __i; }
+
+ bool
+ is_lock_free() const noexcept
+ {
+ // Produce a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(sizeof(_M_i),
+ reinterpret_cast(-_S_alignment));
+ }
+
+ bool
+ is_lock_free() const volatile noexcept
+ {
+ // Produce a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(sizeof(_M_i),
+ reinterpret_cast(-_S_alignment));
+ }
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free
+ = __atomic_always_lock_free(sizeof(_M_i), 0);
+#endif
+
+ void
+ store(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept
+ { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), int(__m)); }
+
+ void
+ store(_Tp __i, memory_order __m = memory_order_seq_cst) volatile noexcept
+ { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), int(__m)); }
+
+ _Tp
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ {
+ alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+ _Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
+ __atomic_load(std::__addressof(_M_i), __ptr, int(__m));
+ return *__ptr;
+ }
+
+ _Tp
+ load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+ {
+ alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+ _Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
+ __atomic_load(std::__addressof(_M_i), __ptr, int(__m));
+ return *__ptr;
+ }
+
+ _Tp
+ exchange(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept
+ {
+ alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+ _Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
+ __atomic_exchange(std::__addressof(_M_i), std::__addressof(__i),
+ __ptr, int(__m));
+ return *__ptr;
+ }
+
+ _Tp
+ exchange(_Tp __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+ _Tp* __ptr = reinterpret_cast<_Tp*>(__buf);
+ __atomic_exchange(std::__addressof(_M_i), std::__addressof(__i),
+ __ptr, int(__m));
+ return *__ptr;
+ }
+
+ bool
+ compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
+ memory_order __f) noexcept
+ {
+ return __atomic_compare_exchange(std::__addressof(_M_i),
+ std::__addressof(__e),
+ std::__addressof(__i),
+ true, int(__s), int(__f));
+ }
+
+ bool
+ compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s,
+ memory_order __f) volatile noexcept
+ {
+ return __atomic_compare_exchange(std::__addressof(_M_i),
+ std::__addressof(__e),
+ std::__addressof(__i),
+ true, int(__s), int(__f));
+ }
+
+ bool
+ compare_exchange_weak(_Tp& __e, _Tp __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return compare_exchange_weak(__e, __i, __m,
+ __cmpexch_failure_order(__m)); }
+
+ bool
+ compare_exchange_weak(_Tp& __e, _Tp __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return compare_exchange_weak(__e, __i, __m,
+ __cmpexch_failure_order(__m)); }
+
+ bool
+ compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
+ memory_order __f) noexcept
+ {
+ return __atomic_compare_exchange(std::__addressof(_M_i),
+ std::__addressof(__e),
+ std::__addressof(__i),
+ false, int(__s), int(__f));
+ }
+
+ bool
+ compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s,
+ memory_order __f) volatile noexcept
+ {
+ return __atomic_compare_exchange(std::__addressof(_M_i),
+ std::__addressof(__e),
+ std::__addressof(__i),
+ false, int(__s), int(__f));
+ }
+
+ bool
+ compare_exchange_strong(_Tp& __e, _Tp __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return compare_exchange_strong(__e, __i, __m,
+ __cmpexch_failure_order(__m)); }
+
+ bool
+ compare_exchange_strong(_Tp& __e, _Tp __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return compare_exchange_strong(__e, __i, __m,
+ __cmpexch_failure_order(__m)); }
+ };
+#undef _GLIBCXX20_INIT
+
+ /// Partial specialization for pointer types.
+ template
+ struct atomic<_Tp*>
+ {
+ using value_type = _Tp*;
+ using difference_type = ptrdiff_t;
+
+ typedef _Tp* __pointer_type;
+ typedef __atomic_base<_Tp*> __base_type;
+ __base_type _M_b;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__pointer_type __p) noexcept : _M_b(__p) { }
+
+ operator __pointer_type() const noexcept
+ { return __pointer_type(_M_b); }
+
+ operator __pointer_type() const volatile noexcept
+ { return __pointer_type(_M_b); }
+
+ __pointer_type
+ operator=(__pointer_type __p) noexcept
+ { return _M_b.operator=(__p); }
+
+ __pointer_type
+ operator=(__pointer_type __p) volatile noexcept
+ { return _M_b.operator=(__p); }
+
+ __pointer_type
+ operator++(int) noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b++;
+ }
+
+ __pointer_type
+ operator++(int) volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b++;
+ }
+
+ __pointer_type
+ operator--(int) noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b--;
+ }
+
+ __pointer_type
+ operator--(int) volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b--;
+ }
+
+ __pointer_type
+ operator++() noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return ++_M_b;
+ }
+
+ __pointer_type
+ operator++() volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return ++_M_b;
+ }
+
+ __pointer_type
+ operator--() noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return --_M_b;
+ }
+
+ __pointer_type
+ operator--() volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return --_M_b;
+ }
+
+ __pointer_type
+ operator+=(ptrdiff_t __d) noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.operator+=(__d);
+ }
+
+ __pointer_type
+ operator+=(ptrdiff_t __d) volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.operator+=(__d);
+ }
+
+ __pointer_type
+ operator-=(ptrdiff_t __d) noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.operator-=(__d);
+ }
+
+ __pointer_type
+ operator-=(ptrdiff_t __d) volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.operator-=(__d);
+ }
+
+ bool
+ is_lock_free() const noexcept
+ { return _M_b.is_lock_free(); }
+
+ bool
+ is_lock_free() const volatile noexcept
+ { return _M_b.is_lock_free(); }
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
+#endif
+
+ void
+ store(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return _M_b.store(__p, __m); }
+
+ void
+ store(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return _M_b.store(__p, __m); }
+
+ __pointer_type
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return _M_b.load(__m); }
+
+ __pointer_type
+ load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+ { return _M_b.load(__m); }
+
+ __pointer_type
+ exchange(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return _M_b.exchange(__p, __m); }
+
+ __pointer_type
+ exchange(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return _M_b.exchange(__p, __m); }
+
+ bool
+ compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m1, memory_order __m2) noexcept
+ { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+
+ bool
+ compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m1,
+ memory_order __m2) volatile noexcept
+ { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+
+ bool
+ compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return compare_exchange_weak(__p1, __p2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ bool
+ compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return compare_exchange_weak(__p1, __p2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ bool
+ compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m1, memory_order __m2) noexcept
+ { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+
+ bool
+ compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m1,
+ memory_order __m2) volatile noexcept
+ { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
+
+ bool
+ compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return _M_b.compare_exchange_strong(__p1, __p2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ bool
+ compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return _M_b.compare_exchange_strong(__p1, __p2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ __pointer_type
+ fetch_add(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.fetch_add(__d, __m);
+ }
+
+ __pointer_type
+ fetch_add(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.fetch_add(__d, __m);
+ }
+
+ __pointer_type
+ fetch_sub(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.fetch_sub(__d, __m);
+ }
+
+ __pointer_type
+ fetch_sub(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+#if __cplusplus >= 201703L
+ static_assert( is_object<_Tp>::value, "pointer to object type" );
+#endif
+ return _M_b.fetch_sub(__d, __m);
+ }
+ };
+
+
+ /// Explicit specialization for char.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef char __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for signed char.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef signed char __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept= default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for unsigned char.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef unsigned char __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept= default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for short.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef short __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for unsigned short.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef unsigned short __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for int.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef int __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for unsigned int.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef unsigned int __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for long.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef long __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for unsigned long.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef unsigned long __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for long long.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef long long __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for unsigned long long.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef unsigned long long __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for wchar_t.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef wchar_t __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_WCHAR_T_LOCK_FREE == 2;
+#endif
+ };
+
+#ifdef _GLIBCXX_USE_CHAR8_T
+ /// Explicit specialization for char8_t.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef char8_t __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus > 201402L
+ static constexpr bool is_always_lock_free = ATOMIC_CHAR8_T_LOCK_FREE == 2;
+#endif
+ };
+#endif
+
+ /// Explicit specialization for char16_t.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef char16_t __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_CHAR16_T_LOCK_FREE == 2;
+#endif
+ };
+
+ /// Explicit specialization for char32_t.
+ template<>
+ struct atomic : __atomic_base
+ {
+ typedef char32_t __integral_type;
+ typedef __atomic_base __base_type;
+
+ atomic() noexcept = default;
+ ~atomic() noexcept = default;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
+
+ using __base_type::operator __integral_type;
+ using __base_type::operator=;
+
+#if __cplusplus >= 201703L
+ static constexpr bool is_always_lock_free = ATOMIC_CHAR32_T_LOCK_FREE == 2;
+#endif
+ };
+
+
+ /// atomic_bool
+ typedef atomic atomic_bool;
+
+ /// atomic_char
+ typedef atomic atomic_char;
+
+ /// atomic_schar
+ typedef atomic atomic_schar;
+
+ /// atomic_uchar
+ typedef atomic atomic_uchar;
+
+ /// atomic_short
+ typedef atomic atomic_short;
+
+ /// atomic_ushort
+ typedef atomic atomic_ushort;
+
+ /// atomic_int
+ typedef atomic atomic_int;
+
+ /// atomic_uint
+ typedef atomic atomic_uint;
+
+ /// atomic_long
+ typedef atomic atomic_long;
+
+ /// atomic_ulong
+ typedef atomic atomic_ulong;
+
+ /// atomic_llong
+ typedef atomic atomic_llong;
+
+ /// atomic_ullong
+ typedef atomic atomic_ullong;
+
+ /// atomic_wchar_t
+ typedef atomic atomic_wchar_t;
+
+#ifdef _GLIBCXX_USE_CHAR8_T
+ /// atomic_char8_t
+ typedef atomic atomic_char8_t;
+#endif
+
+ /// atomic_char16_t
+ typedef atomic atomic_char16_t;
+
+ /// atomic_char32_t
+ typedef atomic atomic_char32_t;
+
+#ifdef _GLIBCXX_USE_C99_STDINT_TR1
+ // _GLIBCXX_RESOLVE_LIB_DEFECTS
+ // 2441. Exact-width atomic typedefs should be provided
+
+ /// atomic_int8_t
+ typedef atomic atomic_int8_t;
+
+ /// atomic_uint8_t
+ typedef atomic atomic_uint8_t;
+
+ /// atomic_int16_t
+ typedef atomic atomic_int16_t;
+
+ /// atomic_uint16_t
+ typedef atomic atomic_uint16_t;
+
+ /// atomic_int32_t
+ typedef atomic atomic_int32_t;
+
+ /// atomic_uint32_t
+ typedef atomic atomic_uint32_t;
+
+ /// atomic_int64_t
+ typedef atomic atomic_int64_t;
+
+ /// atomic_uint64_t
+ typedef atomic atomic_uint64_t;
+
+
+ /// atomic_int_least8_t
+ typedef atomic atomic_int_least8_t;
+
+ /// atomic_uint_least8_t
+ typedef atomic atomic_uint_least8_t;
+
+ /// atomic_int_least16_t
+ typedef atomic atomic_int_least16_t;
+
+ /// atomic_uint_least16_t
+ typedef atomic atomic_uint_least16_t;
+
+ /// atomic_int_least32_t
+ typedef atomic atomic_int_least32_t;
+
+ /// atomic_uint_least32_t
+ typedef atomic atomic_uint_least32_t;
+
+ /// atomic_int_least64_t
+ typedef atomic atomic_int_least64_t;
+
+ /// atomic_uint_least64_t
+ typedef atomic atomic_uint_least64_t;
+
+
+ /// atomic_int_fast8_t
+ typedef atomic atomic_int_fast8_t;
+
+ /// atomic_uint_fast8_t
+ typedef atomic atomic_uint_fast8_t;
+
+ /// atomic_int_fast16_t
+ typedef atomic atomic_int_fast16_t;
+
+ /// atomic_uint_fast16_t
+ typedef atomic atomic_uint_fast16_t;
+
+ /// atomic_int_fast32_t
+ typedef atomic atomic_int_fast32_t;
+
+ /// atomic_uint_fast32_t
+ typedef atomic atomic_uint_fast32_t;
+
+ /// atomic_int_fast64_t
+ typedef atomic atomic_int_fast64_t;
+
+ /// atomic_uint_fast64_t
+ typedef atomic atomic_uint_fast64_t;
+#endif
+
+
+ /// atomic_intptr_t
+ typedef atomic atomic_intptr_t;
+
+ /// atomic_uintptr_t
+ typedef atomic atomic_uintptr_t;
+
+ /// atomic_size_t
+ typedef atomic atomic_size_t;
+
+ /// atomic_ptrdiff_t
+ typedef atomic atomic_ptrdiff_t;
+
+#ifdef _GLIBCXX_USE_C99_STDINT_TR1
+ /// atomic_intmax_t
+ typedef atomic atomic_intmax_t;
+
+ /// atomic_uintmax_t
+ typedef atomic atomic_uintmax_t;
+#endif
+
+ // Function definitions, atomic_flag operations.
+ inline bool
+ atomic_flag_test_and_set_explicit(atomic_flag* __a,
+ memory_order __m) noexcept
+ { return __a->test_and_set(__m); }
+
+ inline bool
+ atomic_flag_test_and_set_explicit(volatile atomic_flag* __a,
+ memory_order __m) noexcept
+ { return __a->test_and_set(__m); }
+
+ inline void
+ atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept
+ { __a->clear(__m); }
+
+ inline void
+ atomic_flag_clear_explicit(volatile atomic_flag* __a,
+ memory_order __m) noexcept
+ { __a->clear(__m); }
+
+ inline bool
+ atomic_flag_test_and_set(atomic_flag* __a) noexcept
+ { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
+
+ inline bool
+ atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept
+ { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
+
+ inline void
+ atomic_flag_clear(atomic_flag* __a) noexcept
+ { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
+
+ inline void
+ atomic_flag_clear(volatile atomic_flag* __a) noexcept
+ { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
+
+
+ template
+ using __atomic_val_t = typename atomic<_Tp>::value_type;
+ template
+ using __atomic_diff_t = typename atomic<_Tp>::difference_type;
+
+ // [atomics.nonmembers] Non-member functions.
+ // Function templates generally applicable to atomic types.
+ template
+ inline bool
+ atomic_is_lock_free(const atomic<_ITp>* __a) noexcept
+ { return __a->is_lock_free(); }
+
+ template
+ inline bool
+ atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept
+ { return __a->is_lock_free(); }
+
+ template
+ inline void
+ atomic_init(atomic<_ITp>* __a, __atomic_val_t<_ITp> __i) noexcept
+ { __a->store(__i, memory_order_relaxed); }
+
+ template
+ inline void
+ atomic_init(volatile atomic<_ITp>* __a, __atomic_val_t<_ITp> __i) noexcept
+ { __a->store(__i, memory_order_relaxed); }
+
+ template
+ inline void
+ atomic_store_explicit(atomic<_ITp>* __a, __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { __a->store(__i, __m); }
+
+ template
+ inline void
+ atomic_store_explicit(volatile atomic<_ITp>* __a, __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { __a->store(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept
+ { return __a->load(__m); }
+
+ template
+ inline _ITp
+ atomic_load_explicit(const volatile atomic<_ITp>* __a,
+ memory_order __m) noexcept
+ { return __a->load(__m); }
+
+ template
+ inline _ITp
+ atomic_exchange_explicit(atomic<_ITp>* __a, __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->exchange(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_exchange_explicit(volatile atomic<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->exchange(__i, __m); }
+
+ template
+ inline bool
+ atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2,
+ memory_order __m1,
+ memory_order __m2) noexcept
+ { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
+
+ template
+ inline bool
+ atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2,
+ memory_order __m1,
+ memory_order __m2) noexcept
+ { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
+
+ template
+ inline bool
+ atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2,
+ memory_order __m1,
+ memory_order __m2) noexcept
+ { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
+
+ template
+ inline bool
+ atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2,
+ memory_order __m1,
+ memory_order __m2) noexcept
+ { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
+
+
+ template
+ inline void
+ atomic_store(atomic<_ITp>* __a, __atomic_val_t<_ITp> __i) noexcept
+ { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline void
+ atomic_store(volatile atomic<_ITp>* __a, __atomic_val_t<_ITp> __i) noexcept
+ { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_load(const atomic<_ITp>* __a) noexcept
+ { return atomic_load_explicit(__a, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_load(const volatile atomic<_ITp>* __a) noexcept
+ { return atomic_load_explicit(__a, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_exchange(atomic<_ITp>* __a, __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_exchange(volatile atomic<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline bool
+ atomic_compare_exchange_weak(atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2) noexcept
+ {
+ return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
+ memory_order_seq_cst,
+ memory_order_seq_cst);
+ }
+
+ template
+ inline bool
+ atomic_compare_exchange_weak(volatile atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2) noexcept
+ {
+ return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
+ memory_order_seq_cst,
+ memory_order_seq_cst);
+ }
+
+ template
+ inline bool
+ atomic_compare_exchange_strong(atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2) noexcept
+ {
+ return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
+ memory_order_seq_cst,
+ memory_order_seq_cst);
+ }
+
+ template
+ inline bool
+ atomic_compare_exchange_strong(volatile atomic<_ITp>* __a,
+ __atomic_val_t<_ITp>* __i1,
+ __atomic_val_t<_ITp> __i2) noexcept
+ {
+ return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
+ memory_order_seq_cst,
+ memory_order_seq_cst);
+ }
+
+ // Function templates for atomic_integral and atomic_pointer operations only.
+ // Some operations (and, or, xor) are only available for atomic integrals,
+ // which is implemented by taking a parameter of type __atomic_base<_ITp>*.
+
+ template
+ inline _ITp
+ atomic_fetch_add_explicit(atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_add(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_add_explicit(volatile atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_add(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_sub_explicit(atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_sub(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_sub_explicit(volatile atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_sub(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_and_explicit(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_and(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_and(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_or_explicit(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_or(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_or(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_xor(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i,
+ memory_order __m) noexcept
+ { return __a->fetch_xor(__i, __m); }
+
+ template
+ inline _ITp
+ atomic_fetch_add(atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i) noexcept
+ { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_add(volatile atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i) noexcept
+ { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_sub(atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i) noexcept
+ { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_sub(volatile atomic<_ITp>* __a,
+ __atomic_diff_t<_ITp> __i) noexcept
+ { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_and(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_and(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_or(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_or(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_xor(__atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
+
+ template
+ inline _ITp
+ atomic_fetch_xor(volatile __atomic_base<_ITp>* __a,
+ __atomic_val_t<_ITp> __i) noexcept
+ { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
+
+#if __cplusplus > 201703L
+#define __cpp_lib_atomic_float 201711L
+ template<>
+ struct atomic : __atomic_float
+ {
+ atomic() noexcept = default;
+
+ constexpr
+ atomic(float __fp) noexcept : __atomic_float(__fp)
+ { }
+
+ atomic& operator=(const atomic&) volatile = delete;
+ atomic& operator=(const atomic&) = delete;
+
+ using __atomic_float::operator=;
+ };
+
+ template<>
+ struct atomic : __atomic_float
+ {
+ atomic() noexcept = default;
+
+ constexpr
+ atomic(double __fp) noexcept : __atomic_float(__fp)
+ { }
+
+ atomic& operator=(const atomic&) volatile = delete;
+ atomic& operator=(const atomic&) = delete;
+
+ using __atomic_float::operator=;
+ };
+
+ template<>
+ struct atomic : __atomic_float
+ {
+ atomic() noexcept = default;
+
+ constexpr
+ atomic(long double __fp) noexcept : __atomic_float(__fp)
+ { }
+
+ atomic& operator=(const atomic&) volatile = delete;
+ atomic& operator=(const atomic&) = delete;
+
+ using __atomic_float::operator=;
+ };
+
+#define __cpp_lib_atomic_ref 201806L
+
+ /// Class template to provide atomic operations on a non-atomic variable.
+ template
+ struct atomic_ref : __atomic_ref<_Tp>
+ {
+ explicit
+ atomic_ref(_Tp& __t) noexcept : __atomic_ref<_Tp>(__t)
+ { }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ atomic_ref(const atomic_ref&) = default;
+
+ using __atomic_ref<_Tp>::operator=;
+ };
+
+#endif // C++2a
+
+ // @} group atomics
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+
+#endif // C++11
+
+#endif // _GLIBCXX_ATOMIC
diff --git a/include/bits/atomic_base.h b/include/bits/atomic_base.h
new file mode 100644
index 0000000..41d5928
--- /dev/null
+++ b/include/bits/atomic_base.h
@@ -0,0 +1,1703 @@
+// -*- C++ -*- header.
+
+// Copyright (C) 2008-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file bits/atomic_base.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{atomic}
+ */
+
+#ifndef _GLIBCXX_ATOMIC_BASE_H
+#define _GLIBCXX_ATOMIC_BASE_H 1
+
+#pragma GCC system_header
+
+#include
+#include
+#include
+#include
+
+#ifndef _GLIBCXX_ALWAYS_INLINE
+#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
+#endif
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /**
+ * @defgroup atomics Atomics
+ *
+ * Components for performing atomic operations.
+ * @{
+ */
+
+ /// Enumeration for memory_order
+#if __cplusplus > 201703L
+ enum class memory_order : int
+ {
+ relaxed,
+ consume,
+ acquire,
+ release,
+ acq_rel,
+ seq_cst
+ };
+
+ inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
+ inline constexpr memory_order memory_order_consume = memory_order::consume;
+ inline constexpr memory_order memory_order_acquire = memory_order::acquire;
+ inline constexpr memory_order memory_order_release = memory_order::release;
+ inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
+ inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
+#else
+ typedef enum memory_order
+ {
+ memory_order_relaxed,
+ memory_order_consume,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst
+ } memory_order;
+#endif
+
+ enum __memory_order_modifier
+ {
+ __memory_order_mask = 0x0ffff,
+ __memory_order_modifier_mask = 0xffff0000,
+ __memory_order_hle_acquire = 0x10000,
+ __memory_order_hle_release = 0x20000
+ };
+
+ constexpr memory_order
+ operator|(memory_order __m, __memory_order_modifier __mod)
+ {
+ return memory_order(int(__m) | int(__mod));
+ }
+
+ constexpr memory_order
+ operator&(memory_order __m, __memory_order_modifier __mod)
+ {
+ return memory_order(int(__m) & int(__mod));
+ }
+
+ // Drop release ordering as per [atomics.types.operations.req]/21
+ constexpr memory_order
+ __cmpexch_failure_order2(memory_order __m) noexcept
+ {
+ return __m == memory_order_acq_rel ? memory_order_acquire
+ : __m == memory_order_release ? memory_order_relaxed : __m;
+ }
+
+ constexpr memory_order
+ __cmpexch_failure_order(memory_order __m) noexcept
+ {
+ return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
+ | __memory_order_modifier(__m & __memory_order_modifier_mask));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ atomic_thread_fence(memory_order __m) noexcept
+ { __atomic_thread_fence(int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ atomic_signal_fence(memory_order __m) noexcept
+ { __atomic_signal_fence(int(__m)); }
+
+ /// kill_dependency
+ template
+ inline _Tp
+ kill_dependency(_Tp __y) noexcept
+ {
+ _Tp __ret(__y);
+ return __ret;
+ }
+
+
+ // Base types for atomics.
+ template
+ struct __atomic_base;
+
+#if __cplusplus <= 201703L
+# define _GLIBCXX20_INIT(I)
+#else
+# define __cpp_lib_atomic_value_initialization 201911L
+# define _GLIBCXX20_INIT(I) = I
+#endif
+
+#define ATOMIC_VAR_INIT(_VI) { _VI }
+
+ template
+ struct atomic;
+
+ template
+ struct atomic<_Tp*>;
+
+ /* The target's "set" value for test-and-set may not be exactly 1. */
+#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
+ typedef bool __atomic_flag_data_type;
+#else
+ typedef unsigned char __atomic_flag_data_type;
+#endif
+
+ /**
+ * @brief Base type for atomic_flag.
+ *
+ * Base type is POD with data, allowing atomic_flag to derive from
+ * it and meet the standard layout type requirement. In addition to
+ * compatibility with a C interface, this allows different
+ * implementations of atomic_flag to use the same atomic operation
+ * functions, via a standard conversion to the __atomic_flag_base
+ * argument.
+ */
+ _GLIBCXX_BEGIN_EXTERN_C
+
+ struct __atomic_flag_base
+ {
+ __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
+ };
+
+ _GLIBCXX_END_EXTERN_C
+
+#define ATOMIC_FLAG_INIT { 0 }
+
+ /// atomic_flag
+ struct atomic_flag : public __atomic_flag_base
+ {
+ atomic_flag() noexcept = default;
+ ~atomic_flag() noexcept = default;
+ atomic_flag(const atomic_flag&) = delete;
+ atomic_flag& operator=(const atomic_flag&) = delete;
+ atomic_flag& operator=(const atomic_flag&) volatile = delete;
+
+ // Conversion to ATOMIC_FLAG_INIT.
+ constexpr atomic_flag(bool __i) noexcept
+ : __atomic_flag_base{ _S_init(__i) }
+ { }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ test_and_set(memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return __atomic_test_and_set (&_M_i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_test_and_set (&_M_i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ clear(memory_order __m = memory_order_seq_cst) noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_consume);
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+
+ __atomic_clear (&_M_i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ clear(memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_consume);
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+
+ __atomic_clear (&_M_i, int(__m));
+ }
+
+ private:
+ static constexpr __atomic_flag_data_type
+ _S_init(bool __i)
+ { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
+ };
+
+
+ /// Base class for atomic integrals.
+ //
+ // For each of the integral types, define atomic_[integral type] struct
+ //
+ // atomic_bool bool
+ // atomic_char char
+ // atomic_schar signed char
+ // atomic_uchar unsigned char
+ // atomic_short short
+ // atomic_ushort unsigned short
+ // atomic_int int
+ // atomic_uint unsigned int
+ // atomic_long long
+ // atomic_ulong unsigned long
+ // atomic_llong long long
+ // atomic_ullong unsigned long long
+ // atomic_char8_t char8_t
+ // atomic_char16_t char16_t
+ // atomic_char32_t char32_t
+ // atomic_wchar_t wchar_t
+ //
+ // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
+ // 8 bytes, since that is what GCC built-in functions for atomic
+ // memory access expect.
+ template
+ struct __atomic_base
+ {
+ using value_type = _ITp;
+ using difference_type = value_type;
+
+ private:
+ typedef _ITp __int_type;
+
+ static constexpr int _S_alignment =
+ sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
+
+ alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
+
+ public:
+ __atomic_base() noexcept = default;
+ ~__atomic_base() noexcept = default;
+ __atomic_base(const __atomic_base&) = delete;
+ __atomic_base& operator=(const __atomic_base&) = delete;
+ __atomic_base& operator=(const __atomic_base&) volatile = delete;
+
+ // Requires __int_type convertible to _M_i.
+ constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
+
+ operator __int_type() const noexcept
+ { return load(); }
+
+ operator __int_type() const volatile noexcept
+ { return load(); }
+
+ __int_type
+ operator=(__int_type __i) noexcept
+ {
+ store(__i);
+ return __i;
+ }
+
+ __int_type
+ operator=(__int_type __i) volatile noexcept
+ {
+ store(__i);
+ return __i;
+ }
+
+ __int_type
+ operator++(int) noexcept
+ { return fetch_add(1); }
+
+ __int_type
+ operator++(int) volatile noexcept
+ { return fetch_add(1); }
+
+ __int_type
+ operator--(int) noexcept
+ { return fetch_sub(1); }
+
+ __int_type
+ operator--(int) volatile noexcept
+ { return fetch_sub(1); }
+
+ __int_type
+ operator++() noexcept
+ { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator++() volatile noexcept
+ { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator--() noexcept
+ { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator--() volatile noexcept
+ { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator+=(__int_type __i) noexcept
+ { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator+=(__int_type __i) volatile noexcept
+ { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator-=(__int_type __i) noexcept
+ { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator-=(__int_type __i) volatile noexcept
+ { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator&=(__int_type __i) noexcept
+ { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator&=(__int_type __i) volatile noexcept
+ { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator|=(__int_type __i) noexcept
+ { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator|=(__int_type __i) volatile noexcept
+ { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator^=(__int_type __i) noexcept
+ { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ __int_type
+ operator^=(__int_type __i) volatile noexcept
+ { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
+
+ bool
+ is_lock_free() const noexcept
+ {
+ // Use a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(sizeof(_M_i),
+ reinterpret_cast(-_S_alignment));
+ }
+
+ bool
+ is_lock_free() const volatile noexcept
+ {
+ // Use a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(sizeof(_M_i),
+ reinterpret_cast(-_S_alignment));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
+
+ __atomic_store_n(&_M_i, __i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ store(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
+
+ __atomic_store_n(&_M_i, __i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+
+ return __atomic_load_n(&_M_i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+
+ return __atomic_load_n(&_M_i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ exchange(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return __atomic_exchange_n(&_M_i, __i, int(__m));
+ }
+
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ exchange(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_exchange_n(&_M_i, __i, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_weak(__int_type& __i1, __int_type __i2,
+ memory_order __m1, memory_order __m2) noexcept
+ {
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
+
+ return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
+ int(__m1), int(__m2));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_weak(__int_type& __i1, __int_type __i2,
+ memory_order __m1,
+ memory_order __m2) volatile noexcept
+ {
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
+
+ return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
+ int(__m1), int(__m2));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_weak(__int_type& __i1, __int_type __i2,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return compare_exchange_weak(__i1, __i2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_weak(__int_type& __i1, __int_type __i2,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return compare_exchange_weak(__i1, __i2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(__int_type& __i1, __int_type __i2,
+ memory_order __m1, memory_order __m2) noexcept
+ {
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
+
+ return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
+ int(__m1), int(__m2));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(__int_type& __i1, __int_type __i2,
+ memory_order __m1,
+ memory_order __m2) volatile noexcept
+ {
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
+
+ return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
+ int(__m1), int(__m2));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(__int_type& __i1, __int_type __i2,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return compare_exchange_strong(__i1, __i2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(__int_type& __i1, __int_type __i2,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return compare_exchange_strong(__i1, __i2, __m,
+ __cmpexch_failure_order(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_add(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_add(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_sub(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_sub(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_and(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_and(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_or(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_or(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_xor(__int_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __int_type
+ fetch_xor(__int_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
+ };
+
+
+ /// Partial specialization for pointer types.
+ template
+ struct __atomic_base<_PTp*>
+ {
+ private:
+ typedef _PTp* __pointer_type;
+
+ __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
+
+ // Factored out to facilitate explicit specialization.
+ constexpr ptrdiff_t
+ _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
+
+ constexpr ptrdiff_t
+ _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
+
+ public:
+ __atomic_base() noexcept = default;
+ ~__atomic_base() noexcept = default;
+ __atomic_base(const __atomic_base&) = delete;
+ __atomic_base& operator=(const __atomic_base&) = delete;
+ __atomic_base& operator=(const __atomic_base&) volatile = delete;
+
+ // Requires __pointer_type convertible to _M_p.
+ constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
+
+ operator __pointer_type() const noexcept
+ { return load(); }
+
+ operator __pointer_type() const volatile noexcept
+ { return load(); }
+
+ __pointer_type
+ operator=(__pointer_type __p) noexcept
+ {
+ store(__p);
+ return __p;
+ }
+
+ __pointer_type
+ operator=(__pointer_type __p) volatile noexcept
+ {
+ store(__p);
+ return __p;
+ }
+
+ __pointer_type
+ operator++(int) noexcept
+ { return fetch_add(1); }
+
+ __pointer_type
+ operator++(int) volatile noexcept
+ { return fetch_add(1); }
+
+ __pointer_type
+ operator--(int) noexcept
+ { return fetch_sub(1); }
+
+ __pointer_type
+ operator--(int) volatile noexcept
+ { return fetch_sub(1); }
+
+ __pointer_type
+ operator++() noexcept
+ { return __atomic_add_fetch(&_M_p, _M_type_size(1),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator++() volatile noexcept
+ { return __atomic_add_fetch(&_M_p, _M_type_size(1),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator--() noexcept
+ { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator--() volatile noexcept
+ { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator+=(ptrdiff_t __d) noexcept
+ { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator+=(ptrdiff_t __d) volatile noexcept
+ { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator-=(ptrdiff_t __d) noexcept
+ { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
+ int(memory_order_seq_cst)); }
+
+ __pointer_type
+ operator-=(ptrdiff_t __d) volatile noexcept
+ { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
+ int(memory_order_seq_cst)); }
+
+ bool
+ is_lock_free() const noexcept
+ {
+ // Produce a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(sizeof(_M_p),
+ reinterpret_cast(-__alignof(_M_p)));
+ }
+
+ bool
+ is_lock_free() const volatile noexcept
+ {
+ // Produce a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(sizeof(_M_p),
+ reinterpret_cast(-__alignof(_M_p)));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ store(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
+
+ __atomic_store_n(&_M_p, __p, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE void
+ store(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_acquire);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+ __glibcxx_assert(__b != memory_order_consume);
+
+ __atomic_store_n(&_M_p, __p, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+
+ return __atomic_load_n(&_M_p, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+ {
+ memory_order __b = __m & __memory_order_mask;
+ __glibcxx_assert(__b != memory_order_release);
+ __glibcxx_assert(__b != memory_order_acq_rel);
+
+ return __atomic_load_n(&_M_p, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ exchange(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) noexcept
+ {
+ return __atomic_exchange_n(&_M_p, __p, int(__m));
+ }
+
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ exchange(__pointer_type __p,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ {
+ return __atomic_exchange_n(&_M_p, __p, int(__m));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m1,
+ memory_order __m2) noexcept
+ {
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
+
+ return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
+ int(__m1), int(__m2));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
+ memory_order __m1,
+ memory_order __m2) volatile noexcept
+ {
+ memory_order __b2 = __m2 & __memory_order_mask;
+ memory_order __b1 = __m1 & __memory_order_mask;
+
+ __glibcxx_assert(__b2 != memory_order_release);
+ __glibcxx_assert(__b2 != memory_order_acq_rel);
+ __glibcxx_assert(__b2 <= __b1);
+
+ return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
+ int(__m1), int(__m2));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ fetch_add(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ fetch_add(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ fetch_sub(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
+
+ _GLIBCXX_ALWAYS_INLINE __pointer_type
+ fetch_sub(ptrdiff_t __d,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
+ };
+
+#if __cplusplus > 201703L
+ // Implementation details of atomic_ref and atomic.
+ namespace __atomic_impl
+ {
+ // Remove volatile and create a non-deduced context for value arguments.
+ template
+ using _Val = remove_volatile_t<_Tp>;
+
+ // As above, but for difference_type arguments.
+ template
+ using _Diff = conditional_t, ptrdiff_t, _Val<_Tp>>;
+
+ template
+ _GLIBCXX_ALWAYS_INLINE bool
+ is_lock_free() noexcept
+ {
+ // Produce a fake, minimally aligned pointer.
+ return __atomic_is_lock_free(_Size, reinterpret_cast(-_Align));
+ }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE void
+ store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
+ { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
+ load(const _Tp* __ptr, memory_order __m) noexcept
+ {
+ alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+ auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
+ __atomic_load(__ptr, __dest, int(__m));
+ return *__dest;
+ }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
+ exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
+ {
+ alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
+ auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
+ __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
+ return *__dest;
+ }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
+ _Val<_Tp> __desired, memory_order __success,
+ memory_order __failure) noexcept
+ {
+ return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
+ std::__addressof(__desired), true,
+ int(__success), int(__failure));
+ }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE bool
+ compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
+ _Val<_Tp> __desired, memory_order __success,
+ memory_order __failure) noexcept
+ {
+ return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
+ std::__addressof(__desired), false,
+ int(__success), int(__failure));
+ }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
+ { return __atomic_fetch_add(__ptr, __i, int(__m)); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
+ { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ { return __atomic_fetch_and(__ptr, __i, int(__m)); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ { return __atomic_fetch_or(__ptr, __i, int(__m)); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
+ { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
+ { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+ { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+ { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+ template
+ _GLIBCXX_ALWAYS_INLINE _Tp
+ __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
+ { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
+
+ template
+ _Tp
+ __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ {
+ _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval + __i;
+ while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
+ memory_order_relaxed))
+ __newval = __oldval + __i;
+ return __oldval;
+ }
+
+ template
+ _Tp
+ __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
+ {
+ _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval - __i;
+ while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
+ memory_order_relaxed))
+ __newval = __oldval - __i;
+ return __oldval;
+ }
+
+ template
+ _Tp
+ __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
+ {
+ _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval + __i;
+ while (!compare_exchange_weak(__ptr, __oldval, __newval,
+ memory_order_seq_cst,
+ memory_order_relaxed))
+ __newval = __oldval + __i;
+ return __newval;
+ }
+
+ template
+ _Tp
+ __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
+ {
+ _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
+ _Val<_Tp> __newval = __oldval - __i;
+ while (!compare_exchange_weak(__ptr, __oldval, __newval,
+ memory_order_seq_cst,
+ memory_order_relaxed))
+ __newval = __oldval - __i;
+ return __newval;
+ }
+ } // namespace __atomic_impl
+
+ // base class for atomic
+ template
+ struct __atomic_float
+ {
+ static_assert(is_floating_point_v<_Fp>);
+
+ static constexpr size_t _S_alignment = __alignof__(_Fp);
+
+ public:
+ using value_type = _Fp;
+ using difference_type = value_type;
+
+ static constexpr bool is_always_lock_free
+ = __atomic_always_lock_free(sizeof(_Fp), 0);
+
+ __atomic_float() = default;
+
+ constexpr
+ __atomic_float(_Fp __t) : _M_fp(__t)
+ { }
+
+ __atomic_float(const __atomic_float&) = delete;
+ __atomic_float& operator=(const __atomic_float&) = delete;
+ __atomic_float& operator=(const __atomic_float&) volatile = delete;
+
+ _Fp
+ operator=(_Fp __t) volatile noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ _Fp
+ operator=(_Fp __t) noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ bool
+ is_lock_free() const volatile noexcept
+ { return __atomic_impl::is_lock_free(); }
+
+ bool
+ is_lock_free() const noexcept
+ { return __atomic_impl::is_lock_free(); }
+
+ void
+ store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
+ { __atomic_impl::store(&_M_fp, __t, __m); }
+
+ void
+ store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
+ { __atomic_impl::store(&_M_fp, __t, __m); }
+
+ _Fp
+ load(memory_order __m = memory_order_seq_cst) const volatile noexcept
+ { return __atomic_impl::load(&_M_fp, __m); }
+
+ _Fp
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::load(&_M_fp, __m); }
+
+ operator _Fp() const volatile noexcept { return this->load(); }
+ operator _Fp() const noexcept { return this->load(); }
+
+ _Fp
+ exchange(_Fp __desired,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
+
+ _Fp
+ exchange(_Fp __desired,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
+
+ bool
+ compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ memory_order __success,
+ memory_order __failure) noexcept
+ {
+ return __atomic_impl::compare_exchange_weak(&_M_fp,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ memory_order __success,
+ memory_order __failure) volatile noexcept
+ {
+ return __atomic_impl::compare_exchange_weak(&_M_fp,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ memory_order __success,
+ memory_order __failure) noexcept
+ {
+ return __atomic_impl::compare_exchange_strong(&_M_fp,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ memory_order __success,
+ memory_order __failure) volatile noexcept
+ {
+ return __atomic_impl::compare_exchange_strong(&_M_fp,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ memory_order __order = memory_order_seq_cst)
+ noexcept
+ {
+ return compare_exchange_weak(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ memory_order __order = memory_order_seq_cst)
+ volatile noexcept
+ {
+ return compare_exchange_weak(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ memory_order __order = memory_order_seq_cst)
+ noexcept
+ {
+ return compare_exchange_strong(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ memory_order __order = memory_order_seq_cst)
+ volatile noexcept
+ {
+ return compare_exchange_strong(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ value_type
+ fetch_add(value_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
+
+ value_type
+ fetch_add(value_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
+
+ value_type
+ fetch_sub(value_type __i,
+ memory_order __m = memory_order_seq_cst) noexcept
+ { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
+
+ value_type
+ fetch_sub(value_type __i,
+ memory_order __m = memory_order_seq_cst) volatile noexcept
+ { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
+
+ value_type
+ operator+=(value_type __i) noexcept
+ { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
+
+ value_type
+ operator+=(value_type __i) volatile noexcept
+ { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
+
+ value_type
+ operator-=(value_type __i) noexcept
+ { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
+
+ value_type
+ operator-=(value_type __i) volatile noexcept
+ { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
+
+ private:
+ alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
+ };
+#undef _GLIBCXX20_INIT
+
+ template, bool = is_floating_point_v<_Tp>>
+ struct __atomic_ref;
+
+ // base class for non-integral, non-floating-point, non-pointer types
+ template
+ struct __atomic_ref<_Tp, false, false>
+ {
+ static_assert(is_trivially_copyable_v<_Tp>);
+
+ // 1/2/4/8/16-byte types must be aligned to at least their size.
+ static constexpr int _S_min_alignment
+ = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
+ ? 0 : sizeof(_Tp);
+
+ public:
+ using value_type = _Tp;
+
+ static constexpr bool is_always_lock_free
+ = __atomic_always_lock_free(sizeof(_Tp), 0);
+
+ static constexpr size_t required_alignment
+ = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
+
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
+ { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+ __atomic_ref(const __atomic_ref&) noexcept = default;
+
+ _Tp
+ operator=(_Tp __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ operator _Tp() const noexcept { return this->load(); }
+
+ bool
+ is_lock_free() const noexcept
+ { return __atomic_impl::is_lock_free(); }
+
+ void
+ store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(_M_ptr, __t, __m); }
+
+ _Tp
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::load(_M_ptr, __m); }
+
+ _Tp
+ exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
+ const noexcept
+ { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+ bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_weak(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_strong(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_weak(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_strong(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ private:
+ _Tp* _M_ptr;
+ };
+
+ // base class for atomic_ref
+ template
+ struct __atomic_ref<_Tp, true, false>
+ {
+ static_assert(is_integral_v<_Tp>);
+
+ public:
+ using value_type = _Tp;
+ using difference_type = value_type;
+
+ static constexpr bool is_always_lock_free
+ = __atomic_always_lock_free(sizeof(_Tp), 0);
+
+ static constexpr size_t required_alignment
+ = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp& __t) : _M_ptr(&__t)
+ { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+ __atomic_ref(const __atomic_ref&) noexcept = default;
+
+ _Tp
+ operator=(_Tp __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ operator _Tp() const noexcept { return this->load(); }
+
+ bool
+ is_lock_free() const noexcept
+ {
+ return __atomic_impl::is_lock_free();
+ }
+
+ void
+ store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(_M_ptr, __t, __m); }
+
+ _Tp
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::load(_M_ptr, __m); }
+
+ _Tp
+ exchange(_Tp __desired,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+ bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_weak(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_strong(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_weak(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_strong(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ value_type
+ fetch_add(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
+
+ value_type
+ fetch_sub(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
+
+ value_type
+ fetch_and(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
+
+ value_type
+ fetch_or(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
+
+ value_type
+ fetch_xor(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
+
+ _GLIBCXX_ALWAYS_INLINE value_type
+ operator++(int) const noexcept
+ { return fetch_add(1); }
+
+ _GLIBCXX_ALWAYS_INLINE value_type
+ operator--(int) const noexcept
+ { return fetch_sub(1); }
+
+ value_type
+ operator++() const noexcept
+ { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
+
+ value_type
+ operator--() const noexcept
+ { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
+
+ value_type
+ operator+=(value_type __i) const noexcept
+ { return __atomic_impl::__add_fetch(_M_ptr, __i); }
+
+ value_type
+ operator-=(value_type __i) const noexcept
+ { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
+
+ value_type
+ operator&=(value_type __i) const noexcept
+ { return __atomic_impl::__and_fetch(_M_ptr, __i); }
+
+ value_type
+ operator|=(value_type __i) const noexcept
+ { return __atomic_impl::__or_fetch(_M_ptr, __i); }
+
+ value_type
+ operator^=(value_type __i) const noexcept
+ { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
+
+ private:
+ _Tp* _M_ptr;
+ };
+
+ // base class for atomic_ref
+ template
+ struct __atomic_ref<_Fp, false, true>
+ {
+ static_assert(is_floating_point_v<_Fp>);
+
+ public:
+ using value_type = _Fp;
+ using difference_type = value_type;
+
+ static constexpr bool is_always_lock_free
+ = __atomic_always_lock_free(sizeof(_Fp), 0);
+
+ static constexpr size_t required_alignment = __alignof__(_Fp);
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Fp& __t) : _M_ptr(&__t)
+ { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+ __atomic_ref(const __atomic_ref&) noexcept = default;
+
+ _Fp
+ operator=(_Fp __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ operator _Fp() const noexcept { return this->load(); }
+
+ bool
+ is_lock_free() const noexcept
+ {
+ return __atomic_impl::is_lock_free();
+ }
+
+ void
+ store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(_M_ptr, __t, __m); }
+
+ _Fp
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::load(_M_ptr, __m); }
+
+ _Fp
+ exchange(_Fp __desired,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+ bool
+ compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_weak(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_strong(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_weak(_Fp& __expected, _Fp __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_weak(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_strong(_Fp& __expected, _Fp __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_strong(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ value_type
+ fetch_add(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
+
+ value_type
+ fetch_sub(value_type __i,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
+
+ value_type
+ operator+=(value_type __i) const noexcept
+ { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
+
+ value_type
+ operator-=(value_type __i) const noexcept
+ { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
+
+ private:
+ _Fp* _M_ptr;
+ };
+
+ // base class for atomic_ref
+ template
+ struct __atomic_ref<_Tp*, false, false>
+ {
+ public:
+ using value_type = _Tp*;
+ using difference_type = ptrdiff_t;
+
+ static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
+
+ static constexpr size_t required_alignment = __alignof__(_Tp*);
+
+ __atomic_ref() = delete;
+ __atomic_ref& operator=(const __atomic_ref&) = delete;
+
+ explicit
+ __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
+ { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
+
+ __atomic_ref(const __atomic_ref&) noexcept = default;
+
+ _Tp*
+ operator=(_Tp* __t) const noexcept
+ {
+ this->store(__t);
+ return __t;
+ }
+
+ operator _Tp*() const noexcept { return this->load(); }
+
+ bool
+ is_lock_free() const noexcept
+ {
+ return __atomic_impl::is_lock_free();
+ }
+
+ void
+ store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
+ { __atomic_impl::store(_M_ptr, __t, __m); }
+
+ _Tp*
+ load(memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::load(_M_ptr, __m); }
+
+ _Tp*
+ exchange(_Tp* __desired,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
+
+ bool
+ compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_weak(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
+ memory_order __success,
+ memory_order __failure) const noexcept
+ {
+ return __atomic_impl::compare_exchange_strong(_M_ptr,
+ __expected, __desired,
+ __success, __failure);
+ }
+
+ bool
+ compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_weak(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ bool
+ compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
+ memory_order __order = memory_order_seq_cst)
+ const noexcept
+ {
+ return compare_exchange_strong(__expected, __desired, __order,
+ __cmpexch_failure_order(__order));
+ }
+
+ _GLIBCXX_ALWAYS_INLINE value_type
+ fetch_add(difference_type __d,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
+
+ _GLIBCXX_ALWAYS_INLINE value_type
+ fetch_sub(difference_type __d,
+ memory_order __m = memory_order_seq_cst) const noexcept
+ { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
+
+ value_type
+ operator++(int) const noexcept
+ { return fetch_add(1); }
+
+ value_type
+ operator--(int) const noexcept
+ { return fetch_sub(1); }
+
+ value_type
+ operator++() const noexcept
+ {
+ return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
+ }
+
+ value_type
+ operator--() const noexcept
+ {
+ return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
+ }
+
+ value_type
+ operator+=(difference_type __d) const noexcept
+ {
+ return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
+ }
+
+ value_type
+ operator-=(difference_type __d) const noexcept
+ {
+ return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
+ }
+
+ private:
+ static constexpr ptrdiff_t
+ _S_type_size(ptrdiff_t __d) noexcept
+ {
+ static_assert(is_object_v<_Tp>);
+ return __d * sizeof(_Tp);
+ }
+
+ _Tp** _M_ptr;
+ };
+
+#endif // C++2a
+
+ // @} group atomics
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace std
+
+#endif
diff --git a/include/bits/atomic_lockfree_defines.h b/include/bits/atomic_lockfree_defines.h
new file mode 100644
index 0000000..f0b57ae
--- /dev/null
+++ b/include/bits/atomic_lockfree_defines.h
@@ -0,0 +1,66 @@
+// -*- C++ -*- header.
+
+// Copyright (C) 2008-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file bits/atomic_lockfree_defines.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{atomic}
+ */
+
+#ifndef _GLIBCXX_ATOMIC_LOCK_FREE_H
+#define _GLIBCXX_ATOMIC_LOCK_FREE_H 1
+
+#pragma GCC system_header
+
+/**
+ * @addtogroup atomics
+ * @{
+ */
+
+/**
+ * Lock-free property.
+ *
+ * 0 indicates that the types are never lock-free.
+ * 1 indicates that the types are sometimes lock-free.
+ * 2 indicates that the types are always lock-free.
+ */
+
+#if __cplusplus >= 201103L
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#ifdef _GLIBCXX_USE_CHAR8_T
+#define ATOMIC_CHAR8_T_LOCK_FREE __GCC_ATOMIC_CHAR8_T_LOCK_FREE
+#endif
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#endif
+
+// @} group atomics
+
+#endif
diff --git a/include/ext/atomicity.h b/include/ext/atomicity.h
new file mode 100644
index 0000000..581e0c9
--- /dev/null
+++ b/include/ext/atomicity.h
@@ -0,0 +1,116 @@
+// Support for atomic operations -*- C++ -*-
+
+// Copyright (C) 2004-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file ext/atomicity.h
+ * This file is a GNU extension to the Standard C++ Library.
+ */
+
+#ifndef _GLIBCXX_ATOMICITY_H
+#define _GLIBCXX_ATOMICITY_H 1
+
+#pragma GCC system_header
+
+#include
+
+typedef int _Atomic_word;
+
+namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ // Functions for portable atomic access.
+ // To abstract locking primitives across all thread policies, use:
+ // __exchange_and_add_dispatch
+ // __atomic_add_dispatch
+#ifdef _GLIBCXX_ATOMIC_BUILTINS
+ inline _Atomic_word
+ __attribute__((__always_inline__))
+ __exchange_and_add(volatile _Atomic_word* __mem, int __val)
+ { return __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL); }
+
+ inline void
+ __attribute__((__always_inline__))
+ __atomic_add(volatile _Atomic_word* __mem, int __val)
+ { __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL); }
+#else
+ _Atomic_word
+ __exchange_and_add(volatile _Atomic_word*, int) _GLIBCXX_NOTHROW;
+
+ void
+ __atomic_add(volatile _Atomic_word*, int) _GLIBCXX_NOTHROW;
+#endif
+
+ inline _Atomic_word
+ __attribute__((__always_inline__))
+ __exchange_and_add_single(_Atomic_word* __mem, int __val)
+ {
+ _Atomic_word __result = *__mem;
+ *__mem += __val;
+ return __result;
+ }
+
+ inline void
+ __attribute__((__always_inline__))
+ __atomic_add_single(_Atomic_word* __mem, int __val)
+ { *__mem += __val; }
+
+ inline _Atomic_word
+ __attribute__ ((__always_inline__))
+ __exchange_and_add_dispatch(_Atomic_word* __mem, int __val)
+ {
+#ifdef __GTHREADS
+ if (__gthread_active_p())
+ return __exchange_and_add(__mem, __val);
+#endif
+ return __exchange_and_add_single(__mem, __val);
+ }
+
+ inline void
+ __attribute__ ((__always_inline__))
+ __atomic_add_dispatch(_Atomic_word* __mem, int __val)
+ {
+#ifdef __GTHREADS
+ if (__gthread_active_p())
+ {
+ __atomic_add(__mem, __val);
+ return;
+ }
+#endif
+ __atomic_add_single(__mem, __val);
+ }
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+
+// Even if the CPU doesn't need a memory barrier, we need to ensure
+// that the compiler doesn't reorder memory accesses across the
+// barriers.
+#ifndef _GLIBCXX_READ_MEM_BARRIER
+#define _GLIBCXX_READ_MEM_BARRIER __atomic_thread_fence (__ATOMIC_ACQUIRE)
+#endif
+#ifndef _GLIBCXX_WRITE_MEM_BARRIER
+#define _GLIBCXX_WRITE_MEM_BARRIER __atomic_thread_fence (__ATOMIC_RELEASE)
+#endif
+
+#endif
diff --git a/include/memory b/include/memory
index ce5503d..0ed1a86 100644
--- a/include/memory
+++ b/include/memory
@@ -69,6 +69,7 @@
#include
#if __cplusplus >= 201103L
+# include
# include
# include // std::less
# include