1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #pragma once
6 
7 #include <fbl/type_support.h>
8 
9 #include <stddef.h>
10 #include <stdint.h>
11 
12 // fbl::atomic<T> provides typesafe C++ atomics on integral types,
13 // enums, and pointers (including function pointers). It does not
14 // support:
15 // - wide characters
16 // - memory_order_consume
17 // - member function pointers
18 
19 // The interface closely matches the underlying builtins and the
20 // standard C and C++ interfaces. Member function and nonmember
21 // function versions of operations are provided. No operator overloads
22 // for e.g. += are provided. Aggregate initialization syntax is not
23 // supported, as fbl::atomic<T> is not an aggregate.
24 
25 // Only the compare-exchange overloads that require both memory orders
26 // explicitly are provided. The rules around what values to use for
27 // the success and failure cases in the single order overload are
28 // subtle. We similarly don't have _explicit vs. not variants of
29 // things, as the std:: versions do.
30 
31 // This file also provides nonmember functions that operate on
32 // fbl::atomic<T>:
33 //   - atomic_init value initializes a default constructed atomic<T>
34 //   - atomic_OP are nonmember versions of atomic<T>::OP
35 
36 // It also provides barriers that are not tied to a particular memory
37 // location:
38 //   - atomic_thread_fence issues a memory barrier
39 //   - atomic_signal_fence issues a compiler barrier
40 
41 // In addition, fbl::atomic does not provide the same compatibility
42 // guarantees with C11's <stdatomic.h> is std::atomic does
43 // (std::atomic is designed to allow interop with C by #define
44 // _Atomic(T) std::atomic<T> and so on). The types are not guaranteed
45 // to be ABI or operationally compatible, and no effort is made to
46 // make any of the fbl::atomic functions extern "C".
47 
48 // However, fbl::atomic<T> _is_ guaranteed to have the same size and
49 // alignment as T, and to be standard layout.
50 
51 namespace fbl {
52 
53 // The underlying builtins specify the memory order parameters as an
54 // int, so let's be explicit here.
55 enum memory_order : int {
56     memory_order_relaxed = __ATOMIC_RELAXED,
57     memory_order_acquire = __ATOMIC_ACQUIRE,
58     memory_order_release = __ATOMIC_RELEASE,
59     memory_order_acq_rel = __ATOMIC_ACQ_REL,
60     memory_order_seq_cst = __ATOMIC_SEQ_CST,
61 };
62 
63 template <typename T>
64 struct atomic {
65     static_assert(is_integral<T>::value || is_enum<T>::value,
66                   "fbl::atomic only support integral, enum, and pointer types");
67     static_assert(!is_same<T, wchar_t>::value, "fbl::atomic does not support wide characters");
68     static_assert(!is_same<T, char16_t>::value, "fbl::atomic does not support wide characters");
69     static_assert(!is_same<T, char32_t>::value, "fbl::atomic does not support wide characters");
70     static_assert(__atomic_always_lock_free(sizeof(T), nullptr),
71                   "The requested integer size is not statically guaranteed to be atomically modifiable");
72 
73     // The default constructor does not initialize the value! This is
74     // the same as plain old integer types.
75     atomic() = default;
atomicatomic76     constexpr atomic(T value)
77         : value_(value) {}
78 
79     // Don't copy, move, or operator= atomic values. Use store instead
80     // of operator=.
81     atomic(const atomic& value) = delete;
82     atomic(atomic&& value) = delete;
83     void operator=(atomic value) = delete;
84     void operator=(atomic value) volatile = delete;
85     atomic& operator=(const atomic& value) = delete;
86     atomic& operator=(const atomic& value) volatile = delete;
87     atomic& operator=(atomic&& value) = delete;
88     atomic& operator=(atomic&& value) volatile = delete;
89 
90     void store(T value, memory_order order = memory_order_seq_cst) {
91         __atomic_store_n(&value_, value, order);
92     };
93     void store(T value, memory_order order = memory_order_seq_cst) volatile {
94         __atomic_store_n(&value_, value, order);
95     }
96 
97     T load(memory_order order = memory_order_seq_cst) const {
98         return __atomic_load_n(&value_, order);
99     }
100     T load(memory_order order = memory_order_seq_cst) const volatile {
101         return __atomic_load_n(&value_, order);
102     }
103 
104     T exchange(T value, memory_order order = memory_order_seq_cst) {
105         return __atomic_exchange_n(&value_, value, order);
106     }
107     T exchange(T value, memory_order order = memory_order_seq_cst) volatile {
108         return __atomic_exchange_n(&value_, value, order);
109     }
110 
111     // Note that the std:: versions take a mutable _reference_ to
112     // expected, not a pointer. In addition, it provides overloads like
113     //    compare_exchange_weak(T* expected, T desired,
114     //                          memory_order order = memory_order_seq_cst);
115     // which are rather magic in that the release orders imply
116     // different success and failure orders, which feels nonobvious
117     // enough to not provide them.
compare_exchange_weakatomic118     bool compare_exchange_weak(T* expected, T desired,
119                                memory_order success_order,
120                                memory_order failure_order) {
121         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ true,
122                                            success_order, failure_order);
123     }
compare_exchange_weakatomic124     bool compare_exchange_weak(T* expected, T desired,
125                                memory_order success_order,
126                                memory_order failure_order) volatile {
127         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ true,
128                                            success_order, failure_order);
129     }
130 
compare_exchange_strongatomic131     bool compare_exchange_strong(T* expected, T desired,
132                                  memory_order success_order,
133                                  memory_order failure_order) {
134         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ false,
135                                            success_order, failure_order);
136     }
compare_exchange_strongatomic137     bool compare_exchange_strong(T* expected, T desired,
138                                  memory_order success_order,
139                                  memory_order failure_order) volatile {
140         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ false,
141                                            success_order, failure_order);
142     }
143 
144     T fetch_add(T value, memory_order order = memory_order_seq_cst) {
145         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
146         return __atomic_fetch_add(&value_, value, order);
147     }
148     T fetch_add(T value, memory_order order = memory_order_seq_cst) volatile {
149         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
150         return __atomic_fetch_add(&value_, value, order);
151     }
152 
153     T fetch_sub(T value, memory_order order = memory_order_seq_cst) {
154         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
155         return __atomic_fetch_sub(&value_, value, order);
156     }
157     T fetch_sub(T value, memory_order order = memory_order_seq_cst) volatile {
158         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
159         return __atomic_fetch_sub(&value_, value, order);
160     }
161 
162     T fetch_and(T value, memory_order order = memory_order_seq_cst) {
163         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
164         return __atomic_fetch_and(&value_, value, order);
165     }
166     T fetch_and(T value, memory_order order = memory_order_seq_cst) volatile {
167         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
168         return __atomic_fetch_and(&value_, value, order);
169     }
170 
171     T fetch_or(T value, memory_order order = memory_order_seq_cst) {
172         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
173         return __atomic_fetch_or(&value_, value, order);
174     }
175     T fetch_or(T value, memory_order order = memory_order_seq_cst) volatile {
176         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
177         return __atomic_fetch_or(&value_, value, order);
178     }
179 
180     T fetch_xor(T value, memory_order order = memory_order_seq_cst) {
181         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
182         return __atomic_fetch_xor(&value_, value, order);
183     }
184     T fetch_xor(T value, memory_order order = memory_order_seq_cst) volatile {
185         static_assert(!fbl::is_same<T, bool>::value, "no arithmetic on atomic<bool>!");
186         return __atomic_fetch_xor(&value_, value, order);
187     }
188 
189 private:
190     template <typename U>
191     friend void atomic_init(atomic<U>* atomic_ptr, U value);
192     template <typename U>
193     friend void atomic_init(volatile atomic<U>* atomic_ptr, U value);
194 
195     T value_;
196 };
197 
198 // An overload of the struct for pointer types. This is identical to
199 // the integral version with the following exceptions.
200 // - There are no |fetch_and|, |fetch_or|, or |fetch_xor| operations.
201 // - |fetch_add| and |fetch_sub| are in terms of ptrdiff_t, just like
202 //   an ordinary pointer.
203 template <typename T>
204 struct atomic<T*> {
205     // The default constructor does not initialize the value! This is
206     // the same as plain old pointer types.
207     atomic() = default;
208     constexpr atomic(T* value)
209         : value_(value) {}
210 
211     // Don't copy, move, or operator= atomic values. Use store instead
212     // of operator=.
213     atomic(const atomic& value) = delete;
214     atomic(atomic&& value) = delete;
215     void operator=(atomic value) = delete;
216     void operator=(atomic value) volatile = delete;
217     atomic& operator=(const atomic& value) = delete;
218     atomic& operator=(const atomic& value) volatile = delete;
219     atomic& operator=(atomic&& value) = delete;
220     atomic& operator=(atomic&& value) volatile = delete;
221 
222     void store(T* value, memory_order order = memory_order_seq_cst) {
223         __atomic_store_n(&value_, value, order);
224     };
225     void store(T* value, memory_order order = memory_order_seq_cst) volatile {
226         __atomic_store_n(&value_, value, order);
227     }
228 
229     T* load(memory_order order = memory_order_seq_cst) const {
230         return __atomic_load_n(&value_, order);
231     }
232     T* load(memory_order order = memory_order_seq_cst) const volatile {
233         return __atomic_load_n(&value_, order);
234     }
235 
236     T* exchange(T* value, memory_order order = memory_order_seq_cst) {
237         return __atomic_exchange_n(&value_, value, order);
238     }
239     T* exchange(T* value, memory_order order = memory_order_seq_cst) volatile {
240         return __atomic_exchange_n(&value_, value, order);
241     }
242 
243     // Note that the std:: versions take a mutable _reference_ to
244     // expected, not a pointer. In addition, it provides overloads like
245     //    compare_exchange_weak(T** expected, T* desired,
246     //                          memory_order order = memory_order_seq_cst);
247     // which are rather magic in that the release orders imply
248     // different success and failure orders, which feels nonobvious
249     // enough to not provide them.
250     bool compare_exchange_weak(T** expected, T* desired,
251                                memory_order success_order,
252                                memory_order failure_order) {
253         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ true,
254                                            success_order, failure_order);
255     }
256     bool compare_exchange_weak(T** expected, T* desired,
257                                memory_order success_order,
258                                memory_order failure_order) volatile {
259         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ true,
260                                            success_order, failure_order);
261     }
262 
263     bool compare_exchange_strong(T** expected, T* desired,
264                                  memory_order success_order,
265                                  memory_order failure_order) {
266         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ false,
267                                            success_order, failure_order);
268     }
269     bool compare_exchange_strong(T** expected, T* desired,
270                                  memory_order success_order,
271                                  memory_order failure_order) volatile {
272         return __atomic_compare_exchange_n(&value_, expected, desired, /* weak */ false,
273                                            success_order, failure_order);
274     }
275 
276     T* fetch_add(ptrdiff_t value, memory_order order = memory_order_seq_cst) {
277         static_assert(!is_same<T, void>::value, "Cannot perform arithmetic on pointer to void");
278         return __atomic_fetch_add(&value_, value * sizeof(T), order);
279     }
280     T* fetch_add(ptrdiff_t value, memory_order order = memory_order_seq_cst) volatile {
281         static_assert(!is_same<T, void>::value, "Cannot perform arithmetic on pointer to void");
282         return __atomic_fetch_add(&value_, value * sizeof(T), order);
283     }
284 
285     T* fetch_sub(ptrdiff_t value, memory_order order = memory_order_seq_cst) {
286         static_assert(!is_same<T, void>::value, "Cannot perform arithmetic on pointer to void");
287         return __atomic_fetch_sub(&value_, value * sizeof(T), order);
288     }
289     T* fetch_sub(ptrdiff_t value, memory_order order = memory_order_seq_cst) volatile {
290         static_assert(!is_same<T, void>::value, "Cannot perform arithmetic on pointer to void");
291         return __atomic_fetch_sub(&value_, value * sizeof(T), order);
292     }
293 
294 private:
295     template <typename U>
296     friend void atomic_init(atomic<U>* atomic_ptr, U value);
297     template <typename U>
298     friend void atomic_init(volatile atomic<U>* atomic_ptr, U value);
299 
300     T* value_;
301 };
302 
303 // Non-member function versions.
304 template <typename T>
305 void atomic_store(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
306     atomic_ptr->store(value, order);
307 }
308 template <typename T>
309 void atomic_store(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
310     atomic_ptr->store(value, order);
311 }
312 
313 template <typename T>
314 T atomic_load(const atomic<T>* atomic_ptr, memory_order order = memory_order_seq_cst) {
315     return atomic_ptr->load(order);
316 }
317 template <typename T>
318 T atomic_load(const volatile atomic<T>* atomic_ptr, memory_order order = memory_order_seq_cst) {
319     return atomic_ptr->load(order);
320 }
321 
322 template <typename T>
323 T atomic_exchange(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
324     return atomic_ptr->exchange(value, order);
325 }
326 template <typename T>
327 T atomic_exchange(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
328     return atomic_ptr->exchange(value, order);
329 }
330 
331 template <typename T>
332 bool atomic_compare_exchange_weak(atomic<T>* atomic_ptr, T* expected, T desired,
333                                   memory_order success_order,
334                                   memory_order failure_order) {
335     return atomic_ptr->compare_exchange_weak(expected, desired, success_order, failure_order);
336 }
337 template <typename T>
338 bool atomic_compare_exchange_weak(volatile atomic<T>* atomic_ptr, T* expected, T desired,
339                                   memory_order success_order,
340                                   memory_order failure_order) {
341     return atomic_ptr->compare_exchange_weak(expected, desired, success_order, failure_order);
342 }
343 
344 template <typename T>
345 bool atomic_compare_exchange_strong(atomic<T>* atomic_ptr, T* expected, T desired,
346                                     memory_order success_order,
347                                     memory_order failure_order) {
348     return atomic_ptr->compare_exchange_strong(expected, desired, success_order, failure_order);
349 }
350 
351 template <typename T>
352 bool atomic_compare_exchange_strong(volatile atomic<T>* atomic_ptr, T* expected, T desired,
353                                     memory_order success_order,
354                                     memory_order failure_order) {
355     return atomic_ptr->compare_exchange_strong(expected, desired, success_order, failure_order);
356 }
357 
358 template <typename T>
359 typename enable_if<!is_pointer<T>::value, T>::type
360 atomic_fetch_add(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
361     return atomic_ptr->fetch_add(value, order);
362 }
363 template <typename T>
364 typename enable_if<!is_pointer<T>::value, T>::type
365 atomic_fetch_add(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
366     return atomic_ptr->fetch_add(value, order);
367 }
368 template <typename T>
369 T* atomic_fetch_add(atomic<T*>* atomic_ptr, ptrdiff_t value, memory_order order = memory_order_seq_cst) {
370     return atomic_ptr->fetch_add(value, order);
371 }
372 template <typename T>
373 T* atomic_fetch_add(volatile atomic<T*>* atomic_ptr, ptrdiff_t value, memory_order order = memory_order_seq_cst) {
374     return atomic_ptr->fetch_add(value, order);
375 }
376 
377 template <typename T>
378 typename enable_if<!is_pointer<T>::value, T>::type
379 atomic_fetch_sub(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
380     return atomic_ptr->fetch_sub(value, order);
381 }
382 template <typename T>
383 typename enable_if<!is_pointer<T>::value, T>::type
384 atomic_fetch_sub(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
385     return atomic_ptr->fetch_sub(value, order);
386 }
387 template <typename T>
388 T* atomic_fetch_sub(atomic<T*>* atomic_ptr, ptrdiff_t value, memory_order order = memory_order_seq_cst) {
389     return atomic_ptr->fetch_sub(value, order);
390 }
391 template <typename T>
392 T* atomic_fetch_sub(volatile atomic<T*>* atomic_ptr, ptrdiff_t value, memory_order order = memory_order_seq_cst) {
393     return atomic_ptr->fetch_sub(value, order);
394 }
395 
396 template <typename T>
397 T atomic_fetch_and(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
398     return atomic_ptr->fetch_and(value, order);
399 }
400 template <typename T>
401 T atomic_fetch_and(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
402     return atomic_ptr->fetch_and(value, order);
403 }
404 
405 template <typename T>
406 T atomic_fetch_or(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
407     return atomic_ptr->fetch_or(value, order);
408 }
409 template <typename T>
410 T atomic_fetch_or(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
411     return atomic_ptr->fetch_or(value, order);
412 }
413 
414 template <typename T>
415 T atomic_fetch_xor(atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
416     return atomic_ptr->fetch_xor(value, order);
417 }
418 template <typename T>
419 T atomic_fetch_xor(volatile atomic<T>* atomic_ptr, T value, memory_order order = memory_order_seq_cst) {
420     return atomic_ptr->fetch_xor(value, order);
421 }
422 
423 // Other atomic functions.
424 
425 // atomic_init value initializes an uninitialized atomic<T>. Only
426 // default constructed atomic<T>s are uninitialized.
427 //
428 // This function is _not_ atomic: any other concurrent access (even if
429 // that access is atomic) is a data race.
430 //
431 // This function is _not_ a substitute for constructing the atomic<T>:
432 // it does not begin the lifetime of an atomic<T> object.
433 //
434 // Using the value constructors is preferable to using this
435 // function. This function exists because calling the value
436 // constructor is occasionally more awkward than separating
437 // construction from initializing.
438 template <typename T>
439 void atomic_init(atomic<T>* atomic_ptr, T value) {
440     atomic_ptr->value_ = value;
441 }
442 template <typename T>
443 void atomic_init(volatile atomic<T>* atomic_ptr, T value) {
444     atomic_ptr->value_ = value;
445 }
446 
447 // atomic_thread_fence issues a memory barrier according to the given
448 // memory order, which synchronizes with other atomic operations and
449 // with other atomic_thread_fences.
450 //
451 // For instance, suppose there is a call to
452 // atomic_thread_fence(memory_order_acquire). No actual memory
453 // accesses (reads or writes) can be moved before reads before the
454 // fence.
455 //
456 // Because the barrier applies to all memory rather than a particular
457 // location, it is a strong guarantee for the given memory order than
458 // the corresponding atomic operation on a memory location, and may be
459 // more expensive.
460 inline void atomic_thread_fence(memory_order order = memory_order_seq_cst) {
461     __atomic_thread_fence(order);
462 }
463 
464 // atomic_signal_fence issues a compiler barrier. No compiler
465 // reorderings that violate the memory guarantees of the given memory
466 // order may be performed.
467 //
468 // For instance, suppose there is a call to
469 // atomic_signal_fence(memory_order_release). No compiler accesses
470 // (reads or writes) can be moved after writes after the fence.
471 //
472 // The name of this function comes from signal handlers, which often
473 // need to prevent compiler reordering but not issue a memory barrier.
474 inline void atomic_signal_fence(memory_order order = memory_order_seq_cst) {
475     __atomic_signal_fence(order);
476 }
477 
478 // Aliases for all integer type names.
479 using atomic_char = atomic<char>;
480 using atomic_schar = atomic<signed char>;
481 using atomic_uchar = atomic<unsigned char>;
482 using atomic_short = atomic<short>;
483 using atomic_ushort = atomic<unsigned short>;
484 using atomic_int = atomic<int>;
485 using atomic_uint = atomic<unsigned int>;
486 using atomic_long = atomic<long>;
487 using atomic_ulong = atomic<unsigned long>;
488 using atomic_llong = atomic<long long>;
489 using atomic_ullong = atomic<unsigned long long>;
490 
491 using atomic_intptr_t = atomic<intptr_t>;
492 using atomic_uintptr_t = atomic<uintptr_t>;
493 using atomic_size_t = atomic<size_t>;
494 using atomic_ptrdiff_t = atomic<ptrdiff_t>;
495 using atomic_intmax_t = atomic<intmax_t>;
496 using atomic_uintmax_t = atomic<uintmax_t>;
497 
498 using atomic_int8_t = atomic<int8_t>;
499 using atomic_uint8_t = atomic<uint8_t>;
500 using atomic_int16_t = atomic<int16_t>;
501 using atomic_uint16_t = atomic<uint16_t>;
502 using atomic_int32_t = atomic<int32_t>;
503 using atomic_uint32_t = atomic<uint32_t>;
504 using atomic_int64_t = atomic<int64_t>;
505 using atomic_uint64_t = atomic<uint64_t>;
506 
507 using atomic_int_least8_t = atomic<int_least8_t>;
508 using atomic_uint_least8_t = atomic<uint_least8_t>;
509 using atomic_int_least16_t = atomic<int_least16_t>;
510 using atomic_uint_least16_t = atomic<uint_least16_t>;
511 using atomic_int_least32_t = atomic<int_least32_t>;
512 using atomic_uint_least32_t = atomic<uint_least32_t>;
513 using atomic_int_least64_t = atomic<int_least64_t>;
514 using atomic_uint_least64_t = atomic<uint_least64_t>;
515 using atomic_int_fast8_t = atomic<int_fast8_t>;
516 using atomic_uint_fast8_t = atomic<uint_fast8_t>;
517 using atomic_int_fast16_t = atomic<int_fast16_t>;
518 using atomic_uint_fast16_t = atomic<uint_fast16_t>;
519 using atomic_int_fast32_t = atomic<int_fast32_t>;
520 using atomic_uint_fast32_t = atomic<uint_fast32_t>;
521 using atomic_int_fast64_t = atomic<int_fast64_t>;
522 using atomic_uint_fast64_t = atomic<uint_fast64_t>;
523 
524 using atomic_bool = atomic<bool>;
525 
526 } // namespace fbl
527