暫無描述
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

Atomic.h 28KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. #pragma once
  2. // Baselib C++ Atomics
  3. // Interface that sticks closely to std::atomic (as of C++11 for the most part)
  4. // See: https://en.cppreference.com/w/cpp/atomic/atomic
  5. //
  6. // ATTENTION: For free functions (not using the baselib::atomic struct), the caller must ensure that the address of obj is aligned to the size of its respective target. (e.g. 64bit values need to be aligned to 8 bytes)
  7. // Failure to comply is undefined behavior (in practice depending on compiler & architecture either crash, non-atomicity or slow performance)
  8. // ALIGN_ATOMIC can be used to ensure this, but generally it is recommended to use the baselib::atomic struct instead!
  9. //
  10. // Forwards internally to C atomic header Baselib_Atomic.h (see also Baselib_Atomic_TypeSafe.h for typesafe C atomics)
  11. // -----------------------------------------------------------------------------------------------------------------------------------------
  12. // Differences to C++11 atomics:
  13. //
  14. // * free functions that operate on types other than baselib::atomic
  15. // * baselib::atomic allows access to its internal value
  16. // * no zero initialization on baselib::atomic
  17. // * guaranteed to be lock-free* (optional for std::atomic)
  18. // * restricts object size from sizeof(char) to 2 * sizeof(void*)
  19. // * (*) unless build is configured correctly, some platforms may forward to libatomic, which may cause locking and prohibit inlining
  20. // * compare_exchange
  21. // * no single parameter versions
  22. // * compare_exchange is not allowed with (success == memory_order_release && failure == memory_order_acquire)
  23. // meaning, that unlike defined in the standard, the failure barrier must be weaker (instead of "not stronger") than the success barrier
  24. // => Use instead: (success == memory_order_acq_rel && failure == memory_order_acquire)
  25. // -----------------------------------------------------------------------------------------------------------------------------------------
  26. // -----------------------------------------------------------------------------------------------------------------------------------------
  27. // Why do we provide our own atomics
  28. //
  29. // * allows for platform specific modifications if need to be (had bugs as well as suboptimal performance in the past on some platforms)
  30. // * forbid lockbased implementation
  31. // * be able to operate on values that aren’t of type std/baselib::atomic<T>
  32. // * avoid indirections that do not always optimize out as expected
  33. // * can enforce avoiding use of libatomic
  34. // * control debug overhead (substantial with std::atomic on some platforms)
  35. // * striving for more consistent cross platform implementation by providing compiler intrinsic based implementations
  36. // * allows adding extensions
  37. // -----------------------------------------------------------------------------------------------------------------------------------------
  38. #include "../C/Baselib_Atomic.h"
  39. #include "Internal/TypeTraits.h"
  40. // Note that aligning by type is not possible with the C compatible COMPILER_ALIGN_AS as MSVC's own alignment attribute does not allow evaluation of sizeof
  41. #define ALIGN_ATOMIC(TYPE_) alignas(sizeof(TYPE_))
  42. #define ALIGNED_ATOMIC(TYPE_) ALIGN_ATOMIC(TYPE_) TYPE_
  43. namespace baselib
  44. {
  45. BASELIB_CPP_INTERFACE
  46. {
  47. enum memory_order_relaxed_t { memory_order_relaxed = 0 }; // Equal to std::memory_order_relaxed
  48. enum memory_order_acquire_t { memory_order_acquire = 2 }; // Equal to std::memory_order_acquire
  49. enum memory_order_release_t { memory_order_release = 3 }; // Equal to std::memory_order_release
  50. enum memory_order_acq_rel_t { memory_order_acq_rel = 4 }; // Equal to std::memory_order_acq_rel
  51. enum memory_order_seq_cst_t { memory_order_seq_cst = 5 }; // Equal to std::memory_order_seq_cst
  52. namespace detail
  53. {
  54. template<typename T, typename ... Rest>
  55. struct is_any : std::false_type {};
  56. template<typename T, typename First>
  57. struct is_any<T, First> : std::is_same<T, First> {};
  58. template<typename T, typename First, typename ... Rest>
  59. struct is_any<T, First, Rest...>
  60. : std::integral_constant<bool, std::is_same<T, First>::value || is_any<T, Rest...>::value>
  61. {};
  62. #define TEST_ATOMICS_PREREQUISITES(_TYPE) \
  63. static_assert(baselib::is_trivially_copyable<_TYPE>::value, "atomic operation operands needs to be trivially copyable"); \
  64. static_assert(sizeof(_TYPE) <= sizeof(void*) * 2, "atomic operation operands need to be smaller or equal than two pointers");
  65. template<typename T> static inline T fail();
  66. template<typename T, typename MemoryOrder, typename ... AllowedMemoryOrders> static inline T fail_prerequisites()
  67. {
  68. TEST_ATOMICS_PREREQUISITES(T);
  69. static_assert(is_any<MemoryOrder, AllowedMemoryOrders...>::value, "the specified memory ordering is invalid for this atomic operation");
  70. return fail<T>();
  71. }
  72. template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure> static inline T fail_prerequisites_cmpxchg()
  73. {
  74. TEST_ATOMICS_PREREQUISITES(T);
  75. // Special error message for success: release, fail: acquire
  76. static_assert(!(std::is_same<MemoryOrderSuccess, baselib::memory_order_release_t>::value && std::is_same<MemoryOrderFailure, baselib::memory_order_acquire_t>::value),
  77. "Unlike std::atomic, baselib does not allow compare_exchange with memory ordering release on success and acquire on failure. Use acq_rel for success instead.\n"
  78. "This restriction is in place to avoid confusion both by users and implementors on the semantics of such an operation which would not be allowed to do an acquire barrier on load "
  79. "but still implies a dedicated acquire fence if (and only if) the operation fails. "
  80. "Scenarios where the user expects acquire on load and release on write are best expressed with acq_rel for success and acquire on failure.");
  81. static_assert(
  82. // fail: relaxed, success: relaxed/acquire/release/seq_cst
  83. (std::is_same<MemoryOrderFailure, baselib::memory_order_relaxed_t>::value &&
  84. is_any<MemoryOrderSuccess, baselib::memory_order_relaxed_t, baselib::memory_order_acquire_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>::value) ||
  85. // fail: acquire, success acquire/release/seq_cst
  86. (std::is_same<MemoryOrderFailure, baselib::memory_order_relaxed_t>::value &&
  87. is_any<MemoryOrderSuccess, baselib::memory_order_acquire_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>::value) ||
  88. // fail: seq_cst, success: seq_cst
  89. (std::is_same<MemoryOrderSuccess, baselib::memory_order_seq_cst_t>::value && std::is_same<MemoryOrderFailure, baselib::memory_order_seq_cst_t>::value),
  90. "the specified combination of memory ordering is invalid for compare exchange operations");
  91. return fail<T>();
  92. }
  93. template<typename T, typename MemoryOrder> static inline T fail_prerequisites_alu()
  94. {
  95. static_assert(std::is_integral<T>::value, "operands of arithmetic atomic operations need to be integral");
  96. return fail_prerequisites<T, MemoryOrder,
  97. baselib::memory_order_relaxed_t,
  98. baselib::memory_order_acquire_t,
  99. baselib::memory_order_release_t,
  100. baselib::memory_order_acq_rel_t,
  101. baselib::memory_order_seq_cst_t>();
  102. }
  103. }
  104. // MACRO generated impl
  105. // re-directs to Baselib_atomic_ API
  106. // ----------------------------------------------------------------------------------------------------------------------------------
  107. #define detail_THREAD_FENCE(order, ...) \
  108. static FORCE_INLINE void atomic_thread_fence(memory_order_##order##_t order) \
  109. { \
  110. return Baselib_atomic_thread_fence_##order(); \
  111. }
  112. #define detail_LOAD(op, order, id, bits, ...) \
  113. template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
  114. static FORCE_INLINE T atomic_load_explicit(const T& obj, memory_order_##order##_t order) \
  115. { \
  116. T ret; \
  117. Baselib_atomic_load_##id##_##order##_v(&obj, &ret); \
  118. return ret; \
  119. }
  120. #define detail_LOAD128(op, order, id, bits, ...) \
  121. template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
  122. static FORCE_INLINE T atomic_load_explicit(const T& obj, memory_order_##order##_t order) \
  123. { \
  124. T ret; \
  125. Baselib_atomic_load_##id##_##order##_v(const_cast<T*>(&obj), &ret); \
  126. return ret; \
  127. }
  128. #define detail_STORE(op, order, id, bits, ...) \
  129. template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
  130. static FORCE_INLINE void atomic_store_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order)\
  131. { \
  132. return Baselib_atomic_store_##id##_##order##_v(&obj, &value); \
  133. }
  134. #define detail_LOAD_STORE(op, order, id, bits, ...) \
  135. template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
  136. static FORCE_INLINE T atomic_##op##_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order) \
  137. { \
  138. T ret; \
  139. Baselib_atomic_##op##_##id##_##order##_v(&obj, &value, &ret); \
  140. return ret; \
  141. }
  142. #define detail_ALU(op, order, id, bits, ...) \
  143. template<typename T, typename std::enable_if<baselib::is_integral_of_size<T, bits/8>::value, int>::type = 0> \
  144. static FORCE_INLINE T atomic_##op##_explicit(T& obj, typename std::common_type<T>::type value, memory_order_##order##_t order) \
  145. { \
  146. T ret; \
  147. Baselib_atomic_##op##_##id##_##order##_v(&obj, &value, &ret); \
  148. return ret; \
  149. }
  150. #define detail_CMP_XCHG(op, order1, order2, id, bits, ...) \
  151. template<typename T, typename std::enable_if<baselib::is_trivial_of_size<T, bits/8>::value, int>::type = 0> \
  152. static FORCE_INLINE bool atomic_##op##_explicit(T& obj, \
  153. typename std::common_type<T>::type& expected, \
  154. typename std::common_type<T>::type desired, \
  155. memory_order_##order1##_t order_success, \
  156. memory_order_##order2##_t order_failure) \
  157. { \
  158. return Baselib_atomic_##op##_##id##_##order1##_##order2##_v(&obj, &expected, &desired); \
  159. }
  160. #define detail_NOT_SUPPORTED(...)
  161. Baselib_Atomic_FOR_EACH_MEMORY_ORDER(
  162. detail_THREAD_FENCE
  163. )
  164. Baselib_Atomic_FOR_EACH_ATOMIC_OP_MEMORY_ORDER_AND_INT_TYPE(
  165. detail_LOAD, // load
  166. detail_STORE, // store
  167. detail_ALU, // add
  168. detail_ALU, // and
  169. detail_ALU, // or
  170. detail_ALU, // xor
  171. detail_LOAD_STORE, // exchange
  172. detail_CMP_XCHG, // compare_exchange_weak
  173. detail_CMP_XCHG // compare_exchange_strong
  174. )
  175. #if PLATFORM_ARCH_64
  176. // 128bit atomics
  177. Baselib_Atomic_FOR_EACH_ATOMIC_OP_AND_MEMORY_ORDER(
  178. detail_LOAD128, // load
  179. detail_STORE, // store
  180. detail_NOT_SUPPORTED, // add
  181. detail_NOT_SUPPORTED, // and
  182. detail_NOT_SUPPORTED, // or
  183. detail_NOT_SUPPORTED, // xor
  184. detail_LOAD_STORE, // exchange
  185. detail_CMP_XCHG, // compare_exchange_weak
  186. detail_CMP_XCHG, // compare_exchange_strong
  187. 128, 128)
  188. #endif
  189. #undef detail_THREAD_FENCE
  190. #undef detail_LOAD128
  191. #undef detail_LOAD
  192. #undef detail_STORE
  193. #undef detail_LOAD_STORE
  194. #undef detail_ALU
  195. #undef detail_CMP_XCHG
  196. #undef detail_NOT_SUPPORTED
  197. template<typename T, typename MemoryOrder>
  198. static FORCE_INLINE T atomic_fetch_sub_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  199. {
  200. return baselib::atomic_fetch_add_explicit(obj, 0 - value, order);
  201. }
  202. // API documentation and default fallback for non-matching types
  203. // ----------------------------------------------------------------------------------------------------------------------
  204. template<typename T, typename MemoryOrder>
  205. static FORCE_INLINE T atomic_load_explicit(const T& obj, MemoryOrder order)
  206. {
  207. return detail::fail_prerequisites<T, MemoryOrder, baselib::memory_order_relaxed_t, baselib::memory_order_acquire_t, baselib::memory_order_seq_cst_t>();
  208. }
  209. template<typename T, typename MemoryOrder>
  210. static FORCE_INLINE void atomic_store_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  211. {
  212. detail::fail_prerequisites<T, MemoryOrder, baselib::memory_order_relaxed_t, baselib::memory_order_release_t, baselib::memory_order_seq_cst_t>();
  213. }
  214. template<typename T, typename MemoryOrder>
  215. static FORCE_INLINE T atomic_fetch_add_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  216. {
  217. return detail::fail_prerequisites_alu<T, MemoryOrder>();
  218. }
  219. template<typename T, typename MemoryOrder>
  220. static FORCE_INLINE T atomic_fetch_and_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  221. {
  222. return detail::fail_prerequisites_alu<T, MemoryOrder>();
  223. }
  224. template<typename T, typename MemoryOrder>
  225. static FORCE_INLINE T atomic_fetch_or_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  226. {
  227. return detail::fail_prerequisites_alu<T, MemoryOrder>();
  228. }
  229. template<typename T, typename MemoryOrder>
  230. static FORCE_INLINE T atomic_fetch_xor_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  231. {
  232. return detail::fail_prerequisites_alu<T, MemoryOrder>();
  233. }
  234. template<typename T, typename MemoryOrder>
  235. static FORCE_INLINE T atomic_exchange_explicit(T& obj, typename std::common_type<T>::type value, MemoryOrder order)
  236. {
  237. return detail::fail_prerequisites<T, MemoryOrder>();
  238. }
  239. template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure>
  240. static FORCE_INLINE bool atomic_compare_exchange_weak_explicit(T& obj,
  241. typename std::common_type<T>::type& expected,
  242. typename std::common_type<T>::type desired,
  243. MemoryOrderSuccess order_success,
  244. MemoryOrderFailure order_failure)
  245. {
  246. detail::fail_prerequisites_cmpxchg<T, MemoryOrderSuccess, MemoryOrderFailure>();
  247. return false;
  248. }
  249. template<typename T, typename MemoryOrderSuccess, typename MemoryOrderFailure>
  250. static FORCE_INLINE bool atomic_compare_exchange_strong_explicit(T& obj,
  251. typename std::common_type<T>::type& expected,
  252. typename std::common_type<T>::type desired,
  253. MemoryOrderSuccess order_success,
  254. MemoryOrderFailure order_failure)
  255. {
  256. detail::fail_prerequisites_cmpxchg<T, MemoryOrderSuccess, MemoryOrderFailure>();
  257. return false;
  258. }
  259. // default memory order (memory_order_seq_cst)
  260. // ----------------------------------------------------------------------------------------------------------------------
  261. template<typename T>
  262. static FORCE_INLINE T atomic_load(const T& obj)
  263. {
  264. return baselib::atomic_load_explicit(obj, memory_order_seq_cst);
  265. }
  266. template<typename T>
  267. static FORCE_INLINE void atomic_store(T& obj, typename std::common_type<T>::type value)
  268. {
  269. return baselib::atomic_store_explicit(obj, value, memory_order_seq_cst);
  270. }
  271. template<typename T>
  272. static FORCE_INLINE T atomic_fetch_add(T& obj, typename std::common_type<T>::type value)
  273. {
  274. return baselib::atomic_fetch_add_explicit(obj, value, memory_order_seq_cst);
  275. }
  276. template<typename T>
  277. static FORCE_INLINE T atomic_fetch_sub(T& obj, typename std::common_type<T>::type value)
  278. {
  279. return baselib::atomic_fetch_sub_explicit(obj, value, memory_order_seq_cst);
  280. }
  281. template<typename T>
  282. static FORCE_INLINE T atomic_fetch_and(T& obj, typename std::common_type<T>::type value)
  283. {
  284. return baselib::atomic_fetch_and_explicit(obj, value, memory_order_seq_cst);
  285. }
  286. template<typename T>
  287. static FORCE_INLINE T atomic_fetch_or(T& obj, typename std::common_type<T>::type value)
  288. {
  289. return baselib::atomic_fetch_or_explicit(obj, value, memory_order_seq_cst);
  290. }
  291. template<typename T>
  292. static FORCE_INLINE T atomic_fetch_xor(T& obj, typename std::common_type<T>::type value)
  293. {
  294. return baselib::atomic_fetch_xor_explicit(obj, value, memory_order_seq_cst);
  295. }
  296. template<typename T>
  297. static FORCE_INLINE T atomic_exchange(T& obj, typename std::common_type<T>::type value)
  298. {
  299. return baselib::atomic_exchange_explicit(obj, value, memory_order_seq_cst);
  300. }
  301. template<typename T>
  302. static FORCE_INLINE bool atomic_compare_exchange_weak(T& obj,
  303. typename std::common_type<T>::type& expected,
  304. typename std::common_type<T>::type desired)
  305. {
  306. return baselib::atomic_compare_exchange_weak_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
  307. }
  308. template<typename T>
  309. static FORCE_INLINE bool atomic_compare_exchange_strong(T& obj,
  310. typename std::common_type<T>::type& expected,
  311. typename std::common_type<T>::type desired)
  312. {
  313. return baselib::atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
  314. }
  315. template<typename T>
  316. struct atomic_common
  317. {
  318. using value_type = T;
  319. TEST_ATOMICS_PREREQUISITES(T);
  320. ALIGNED_ATOMIC(T) obj;
  321. FORCE_INLINE atomic_common() = default;
  322. // Initializes atomic with a given value. Initialization is not atomic!
  323. FORCE_INLINE atomic_common(T value)
  324. {
  325. obj = value;
  326. }
  327. FORCE_INLINE operator T() const { return baselib::atomic_load_explicit(obj, memory_order_seq_cst); }
  328. FORCE_INLINE T operator=(T value) { baselib::atomic_store_explicit(obj, value, memory_order_seq_cst); return value; }
  329. template<typename TMemoryOrder = memory_order_seq_cst_t>
  330. FORCE_INLINE T load(TMemoryOrder order = memory_order_seq_cst) const
  331. {
  332. return baselib::atomic_load_explicit(obj, order);
  333. }
  334. template<typename TMemoryOrder = memory_order_seq_cst_t>
  335. FORCE_INLINE void store(T value, TMemoryOrder order = memory_order_seq_cst)
  336. {
  337. return baselib::atomic_store_explicit(obj, value, order);
  338. }
  339. template<typename TMemoryOrder = memory_order_seq_cst_t>
  340. FORCE_INLINE T exchange(T value, TMemoryOrder order = memory_order_seq_cst)
  341. {
  342. return baselib::atomic_exchange_explicit(obj, value, order);
  343. }
  344. template<typename TMemoryOrderSuccess, typename TMemoryOrderFailure>
  345. FORCE_INLINE bool compare_exchange_weak(T& expected, T desired, TMemoryOrderSuccess order_success, TMemoryOrderFailure order_failure)
  346. {
  347. return baselib::atomic_compare_exchange_weak_explicit(obj, expected, desired, order_success, order_failure);
  348. }
  349. FORCE_INLINE bool compare_exchange_weak(T& expected, T desired)
  350. {
  351. return baselib::atomic_compare_exchange_weak_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
  352. }
  353. template<typename TMemoryOrderSuccess, typename TMemoryOrderFailure>
  354. FORCE_INLINE bool compare_exchange_strong(T& expected, T desired, TMemoryOrderSuccess order_success, TMemoryOrderFailure order_failure)
  355. {
  356. return baselib::atomic_compare_exchange_strong_explicit(obj, expected, desired, order_success, order_failure);
  357. }
  358. FORCE_INLINE bool compare_exchange_strong(T& expected, T desired)
  359. {
  360. return baselib::atomic_compare_exchange_strong_explicit(obj, expected, desired, memory_order_seq_cst, memory_order_seq_cst);
  361. }
  362. };
  363. template<typename T, bool IsIntegral>
  364. struct atomic_base {};
  365. // Atomic type for integral types.
  366. template<typename T>
  367. struct atomic_base<T, true> : atomic_common<T>
  368. {
  369. using atomic_common<T>::atomic_common;
  370. template<typename TMemoryOrder = memory_order_seq_cst_t>
  371. FORCE_INLINE T fetch_add(T value, TMemoryOrder order = memory_order_seq_cst)
  372. {
  373. return baselib::atomic_fetch_add_explicit(atomic_common<T>::obj, value, order);
  374. }
  375. template<typename TMemoryOrder = memory_order_seq_cst_t>
  376. FORCE_INLINE T fetch_sub(T value, TMemoryOrder order = memory_order_seq_cst)
  377. {
  378. return baselib::atomic_fetch_sub_explicit(atomic_common<T>::obj, value, order);
  379. }
  380. template<typename TMemoryOrder = memory_order_seq_cst_t>
  381. FORCE_INLINE T fetch_and(T value, TMemoryOrder order = memory_order_seq_cst)
  382. {
  383. return baselib::atomic_fetch_and_explicit(atomic_common<T>::obj, value, order);
  384. }
  385. template<typename TMemoryOrder = memory_order_seq_cst_t>
  386. FORCE_INLINE T fetch_or(T value, TMemoryOrder order = memory_order_seq_cst)
  387. {
  388. return baselib::atomic_fetch_or_explicit(atomic_common<T>::obj, value, order);
  389. }
  390. template<typename TMemoryOrder = memory_order_seq_cst_t>
  391. FORCE_INLINE T fetch_xor(T value, TMemoryOrder order = memory_order_seq_cst)
  392. {
  393. return baselib::atomic_fetch_xor_explicit(atomic_common<T>::obj, value, order);
  394. }
  395. FORCE_INLINE T operator++(int) { return baselib::atomic_fetch_add_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst); }
  396. FORCE_INLINE T operator--(int) { return baselib::atomic_fetch_sub_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst); }
  397. FORCE_INLINE T operator++() { return baselib::atomic_fetch_add_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst) + T(1); }
  398. FORCE_INLINE T operator--() { return baselib::atomic_fetch_sub_explicit(atomic_common<T>::obj, T(1), memory_order_seq_cst) - T(1); }
  399. FORCE_INLINE T operator+=(T value) { return baselib::atomic_fetch_add_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) + value; }
  400. FORCE_INLINE T operator-=(T value) { return baselib::atomic_fetch_sub_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) - value; }
  401. FORCE_INLINE T operator&=(T value) { return baselib::atomic_fetch_and_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) & value; }
  402. FORCE_INLINE T operator|=(T value) { return baselib::atomic_fetch_or_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) | value; }
  403. FORCE_INLINE T operator^=(T value) { return baselib::atomic_fetch_xor_explicit(atomic_common<T>::obj, value, memory_order_seq_cst) ^ value; }
  404. };
  405. // Atomic type for non-integral types.
  406. template<typename T>
  407. struct atomic_base<T, false> : atomic_common<T>
  408. {
  409. using atomic_common<T>::atomic_common;
  410. };
  411. template<typename T>
  412. struct atomic : atomic_base<T, std::is_integral<T>::value>
  413. {
  414. using atomic_base<T, std::is_integral<T>::value>::atomic_base;
  415. };
  416. #undef TEST_ATOMICS_PREREQUISITES
  417. }
  418. }