1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #pragma once
6 
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <zircon/compiler.h>
10 
11 __BEGIN_CDECLS
12 
13 // strongly ordered versions of the atomic routines as implemented
14 // by the compiler with arch-dependent memory barriers.
atomic_swap(volatile int * ptr,int val)15 static inline int atomic_swap(volatile int* ptr, int val) {
16     return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
17 }
18 
atomic_add(volatile int * ptr,int val)19 static inline int atomic_add(volatile int* ptr, int val) {
20     return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
21 }
22 
atomic_and(volatile int * ptr,int val)23 static inline int atomic_and(volatile int* ptr, int val) {
24     return __atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST);
25 }
26 
atomic_or(volatile int * ptr,int val)27 static inline int atomic_or(volatile int* ptr, int val) {
28     return __atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST);
29 }
30 
atomic_xor(volatile int * ptr,int val)31 static inline int atomic_xor(volatile int* ptr, int val) {
32     return __atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST);
33 }
34 
atomic_cmpxchg(volatile int * ptr,int * oldval,int newval)35 static inline bool atomic_cmpxchg(volatile int* ptr, int* oldval, int newval) {
36     return __atomic_compare_exchange_n(ptr, oldval, newval, false,
37                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
38 }
39 
atomic_load(volatile int * ptr)40 static inline int atomic_load(volatile int* ptr) {
41     return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
42 }
43 
atomic_store(volatile int * ptr,int newval)44 static inline void atomic_store(volatile int* ptr, int newval) {
45     __atomic_store_n(ptr, newval, __ATOMIC_SEQ_CST);
46 }
47 
48 // relaxed versions of the above
atomic_swap_relaxed(volatile int * ptr,int val)49 static inline int atomic_swap_relaxed(volatile int* ptr, int val) {
50     return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
51 }
52 
atomic_add_relaxed(volatile int * ptr,int val)53 static inline int atomic_add_relaxed(volatile int* ptr, int val) {
54     return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
55 }
56 
atomic_and_relaxed(volatile int * ptr,int val)57 static inline int atomic_and_relaxed(volatile int* ptr, int val) {
58     return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
59 }
60 
atomic_or_relaxed(volatile int * ptr,int val)61 static inline int atomic_or_relaxed(volatile int* ptr, int val) {
62     return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
63 }
64 
atomic_xor_relaxed(volatile int * ptr,int val)65 static inline int atomic_xor_relaxed(volatile int* ptr, int val) {
66     return __atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED);
67 }
68 
atomic_cmpxchg_relaxed(volatile int * ptr,int * oldval,int newval)69 static inline bool atomic_cmpxchg_relaxed(volatile int* ptr, int* oldval, int newval) {
70     return __atomic_compare_exchange_n(ptr, oldval, newval, false,
71                                        __ATOMIC_RELAXED, __ATOMIC_RELAXED);
72 }
73 
atomic_load_relaxed(volatile int * ptr)74 static int atomic_load_relaxed(volatile int* ptr) {
75     return __atomic_load_n(ptr, __ATOMIC_RELAXED);
76 }
77 
atomic_store_relaxed(volatile int * ptr,int newval)78 static void atomic_store_relaxed(volatile int* ptr, int newval) {
79     __atomic_store_n(ptr, newval, __ATOMIC_RELAXED);
80 }
81 
atomic_add_release(volatile int * ptr,int val)82 static inline int atomic_add_release(volatile int* ptr, int val) {
83     return __atomic_fetch_add(ptr, val, __ATOMIC_RELEASE);
84 }
85 
atomic_fence(void)86 static inline void atomic_fence(void) {
87     __atomic_thread_fence(__ATOMIC_SEQ_CST);
88 }
89 
atomic_fence_acquire(void)90 static inline void atomic_fence_acquire(void) {
91     __atomic_thread_fence(__ATOMIC_ACQUIRE);
92 }
93 
atomic_load_u32(volatile uint32_t * ptr)94 static inline uint32_t atomic_load_u32(volatile uint32_t* ptr) {
95     return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
96 }
97 
atomic_store_relaxed_u32(volatile uint32_t * ptr,uint32_t newval)98 static inline void atomic_store_relaxed_u32(volatile uint32_t* ptr, uint32_t newval) {
99     __atomic_store_n(ptr, newval, __ATOMIC_RELAXED);
100 }
101 
102 // 64-bit versions. Assumes the compiler/platform is LLP so int is 32 bits.
atomic_swap_64(volatile int64_t * ptr,int64_t val)103 static inline int64_t atomic_swap_64(volatile int64_t* ptr, int64_t val) {
104     return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
105 }
106 
atomic_add_64(volatile int64_t * ptr,int64_t val)107 static inline int64_t atomic_add_64(volatile int64_t* ptr, int64_t val) {
108     return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
109 }
110 
atomic_and_64(volatile int64_t * ptr,int64_t val)111 static inline int64_t atomic_and_64(volatile int64_t* ptr, int64_t val) {
112     return __atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST);
113 }
114 
atomic_or_64(volatile int64_t * ptr,int64_t val)115 static inline int64_t atomic_or_64(volatile int64_t* ptr, int64_t val) {
116     return __atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST);
117 }
118 
atomic_xor_64(volatile int64_t * ptr,int64_t val)119 static inline int64_t atomic_xor_64(volatile int64_t* ptr, int64_t val) {
120     return __atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST);
121 }
122 
atomic_cmpxchg_64(volatile int64_t * ptr,int64_t * oldval,int64_t newval)123 static inline bool atomic_cmpxchg_64(volatile int64_t* ptr, int64_t* oldval,
124                                      int64_t newval) {
125     return __atomic_compare_exchange_n(ptr, oldval, newval, false,
126                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
127 }
128 
atomic_load_64(volatile int64_t * ptr)129 static inline int64_t atomic_load_64(volatile int64_t* ptr) {
130     return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
131 }
132 
atomic_load_64_relaxed(volatile int64_t * ptr)133 static inline int64_t atomic_load_64_relaxed(volatile int64_t* ptr) {
134     return __atomic_load_n(ptr, __ATOMIC_RELAXED);
135 }
136 
atomic_store_64(volatile int64_t * ptr,int64_t newval)137 static inline void atomic_store_64(volatile int64_t* ptr, int64_t newval) {
138     __atomic_store_n(ptr, newval, __ATOMIC_SEQ_CST);
139 }
140 
atomic_swap_u64(volatile uint64_t * ptr,uint64_t val)141 static inline uint64_t atomic_swap_u64(volatile uint64_t* ptr, uint64_t val) {
142     return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
143 }
144 
atomic_add_u64(volatile uint64_t * ptr,uint64_t val)145 static inline uint64_t atomic_add_u64(volatile uint64_t* ptr, uint64_t val) {
146     return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
147 }
148 
atomic_and_u64(volatile uint64_t * ptr,uint64_t val)149 static inline uint64_t atomic_and_u64(volatile uint64_t* ptr, uint64_t val) {
150     return __atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST);
151 }
152 
atomic_or_u64(volatile uint64_t * ptr,uint64_t val)153 static inline uint64_t atomic_or_u64(volatile uint64_t* ptr, uint64_t val) {
154     return __atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST);
155 }
156 
atomic_xor_u64(volatile uint64_t * ptr,uint64_t val)157 static inline uint64_t atomic_xor_u64(volatile uint64_t* ptr, uint64_t val) {
158     return __atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST);
159 }
160 
atomic_cmpxchg_u64(volatile uint64_t * ptr,uint64_t * oldval,uint64_t newval)161 static inline bool atomic_cmpxchg_u64(volatile uint64_t* ptr, uint64_t* oldval,
162                                       uint64_t newval) {
163     return __atomic_compare_exchange_n(ptr, oldval, newval, false,
164                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
165 }
166 
atomic_load_u64(volatile uint64_t * ptr)167 static inline uint64_t atomic_load_u64(volatile uint64_t* ptr) {
168     return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
169 }
170 
atomic_load_u64_relaxed(volatile uint64_t * ptr)171 static inline uint64_t atomic_load_u64_relaxed(volatile uint64_t* ptr) {
172     return __atomic_load_n(ptr, __ATOMIC_RELAXED);
173 }
174 
atomic_store_u64(volatile uint64_t * ptr,uint64_t newval)175 static inline void atomic_store_u64(volatile uint64_t* ptr, uint64_t newval) {
176     __atomic_store_n(ptr, newval, __ATOMIC_SEQ_CST);
177 }
178 
atomic_store_u64_relaxed(volatile uint64_t * ptr,uint64_t newval)179 static inline void atomic_store_u64_relaxed(volatile uint64_t* ptr, uint64_t newval) {
180     __atomic_store_n(ptr, newval, __ATOMIC_RELAXED);
181 }
182 
atomic_signal_fence(void)183 static inline void atomic_signal_fence(void) {
184     __atomic_signal_fence(__ATOMIC_SEQ_CST);
185 }
186 
atomic_add_64_relaxed(volatile int64_t * ptr,int64_t val)187 static inline int64_t atomic_add_64_relaxed(volatile int64_t* ptr, int64_t val) {
188     return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
189 }
190 
atomic_add_u64_relaxed(volatile uint64_t * ptr,uint64_t val)191 static inline uint64_t atomic_add_u64_relaxed(volatile uint64_t* ptr, uint64_t val) {
192     return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
193 }
194 
atomic_cmpxchg_64_relaxed(volatile int64_t * ptr,int64_t * oldval,int64_t newval)195 static inline bool atomic_cmpxchg_64_relaxed(volatile int64_t* ptr, int64_t* oldval,
196                                              int64_t newval) {
197     return __atomic_compare_exchange_n(ptr, oldval, newval, false,
198                                        __ATOMIC_RELAXED, __ATOMIC_RELAXED);
199 }
200 
atomic_cmpxchg_u64_relaxed(volatile uint64_t * ptr,uint64_t * oldval,uint64_t newval)201 static inline bool atomic_cmpxchg_u64_relaxed(volatile uint64_t* ptr, uint64_t* oldval,
202                                               uint64_t newval) {
203     return __atomic_compare_exchange_n(ptr, oldval, newval, false,
204                                        __ATOMIC_RELAXED, __ATOMIC_RELAXED);
205 }
206 
207 __END_CDECLS
208