1 /*
2 * Copyright (c) 2016 - 2020, Nordic Semiconductor ASA
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef NRFX_ATOMIC_INTERNAL_H__
33 #define NRFX_ATOMIC_INTERNAL_H__
34
35 #include <nrfx.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /* Only Cortex-M cores > 3 support LDREX/STREX instructions. */
42 #if ((__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)) == 0
43 #error "Unsupported core version"
44 #endif
45
46 #if defined ( __CC_ARM )
nrfx_atomic_internal_mov(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)47 static __asm uint32_t nrfx_atomic_internal_mov(nrfx_atomic_u32_t * p_ptr,
48 uint32_t value,
49 uint32_t * p_new)
50 {
51 /* The base standard specifies that arguments are passed in the core registers
52 * r0-r3 and on the stack. Registers r4 and r5 must be saved on the stack.
53 * Only even number of register pushes are allowed. This is a requirement
54 * of the Procedure Call Standard for the ARM Architecture [AAPCS].
55 */
56 push {r4, r5}
57 mov r4, r0
58
59 loop_mov
60 ldrex r0, [r4]
61 mov r5, r1
62 strex r3, r5, [r4]
63 cmp r3, #0
64 bne loop_mov
65
66 str r5, [r2]
67 pop {r4, r5}
68 bx lr
69 }
70
71
nrfx_atomic_internal_orr(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)72 static __asm uint32_t nrfx_atomic_internal_orr(nrfx_atomic_u32_t * p_ptr,
73 uint32_t value,
74 uint32_t * p_new)
75 {
76 push {r4, r5}
77 mov r4, r0
78
79 loop_orr
80 ldrex r0, [r4]
81 orr r5, r0, r1
82 strex r3, r5, [r4]
83 cmp r3, #0
84 bne loop_orr
85
86 str r5, [r2]
87 pop {r4, r5}
88 bx lr
89 }
90
nrfx_atomic_internal_and(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)91 static __asm uint32_t nrfx_atomic_internal_and(nrfx_atomic_u32_t * p_ptr,
92 uint32_t value,
93 uint32_t * p_new)
94 {
95 push {r4, r5}
96 mov r4, r0
97
98 loop_and
99 ldrex r0, [r4]
100 and r5, r0, r1
101 strex r3, r5, [r4]
102 cmp r3, #0
103 bne loop_and
104
105 str r5, [r2]
106 pop {r4, r5}
107 bx lr
108 }
109
nrfx_atomic_internal_eor(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)110 static __asm uint32_t nrfx_atomic_internal_eor(nrfx_atomic_u32_t * p_ptr,
111 uint32_t value,
112 uint32_t * p_new)
113 {
114 push {r4, r5}
115 mov r4, r0
116
117 loop_eor
118 ldrex r0, [r4]
119 eor r5, r0, r1
120 strex r3, r5, [r4]
121 cmp r3, #0
122 bne loop_eor
123
124 str r5, [r2]
125 pop {r4, r5}
126 bx lr
127 }
128
nrfx_atomic_internal_add(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)129 static __asm uint32_t nrfx_atomic_internal_add(nrfx_atomic_u32_t * p_ptr,
130 uint32_t value,
131 uint32_t * p_new)
132 {
133 push {r4, r5}
134 mov r4, r0
135
136 loop_add
137 ldrex r0, [r4]
138 add r5, r0, r1
139 strex r3, r5, [r4]
140 cmp r3, #0
141 bne loop_add
142
143 str r5, [r2]
144 pop {r4, r5}
145 bx lr
146 }
147
nrfx_atomic_internal_sub(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)148 static __asm uint32_t nrfx_atomic_internal_sub(nrfx_atomic_u32_t * p_ptr,
149 uint32_t value,
150 uint32_t * p_new)
151 {
152 push {r4, r5}
153 mov r4, r0
154
155 loop_sub
156 ldrex r0, [r4]
157 sub r5, r0, r1
158 strex r3, r5, [r4]
159 cmp r3, #0
160 bne loop_sub
161
162 str r5, [r2]
163 pop {r4, r5}
164 bx lr
165 }
166
nrfx_atomic_internal_cmp_exch(nrfx_atomic_u32_t * p_data,uint32_t * p_expected,uint32_t value)167 static __asm bool nrfx_atomic_internal_cmp_exch(nrfx_atomic_u32_t * p_data,
168 uint32_t * p_expected,
169 uint32_t value)
170 {
171 #define RET_REG r0
172 #define P_EXPC r1
173 #define VALUE r2
174 #define STR_RES r3
175 #define P_DATA r4
176 #define EXPC_VAL r5
177 #define ACT_VAL r6
178
179 push {r4-r6}
180 mov P_DATA, r0
181 mov RET_REG, #0
182
183 loop_cmp_exch
184 ldrex ACT_VAL, [P_DATA]
185 ldr EXPC_VAL, [P_EXPC]
186 cmp ACT_VAL, EXPC_VAL
187 ittee eq
188 strexeq STR_RES, VALUE, [P_DATA]
189 moveq RET_REG, #1
190 strexne STR_RES, ACT_VAL, [P_DATA]
191 strne ACT_VAL, [P_EXPC]
192 cmp STR_RES, #0
193 itt ne
194 movne RET_REG, #0
195 bne loop_cmp_exch
196
197 pop {r4-r6}
198 bx lr
199
200 #undef RET_REG
201 #undef P_EXPC
202 #undef VALUE
203 #undef STR_RES
204 #undef P_DATA
205 #undef EXPC_VAL
206 #undef ACT_VAL
207 }
208
nrfx_atomic_internal_sub_hs(nrfx_atomic_u32_t * p_ptr,uint32_t value,uint32_t * p_new)209 static __asm uint32_t nrfx_atomic_internal_sub_hs(nrfx_atomic_u32_t * p_ptr,
210 uint32_t value,
211 uint32_t * p_new)
212 {
213 push {r4, r5}
214 mov r4, r0
215
216 loop_sub_ge
217 ldrex r0, [r4]
218 cmp r0, r1
219 ite hs
220 subhs r5, r0, r1
221 movlo r5, r0
222 strex r3, r5, [r4]
223 cmp r3, #0
224 bne loop_sub_ge
225
226 str r5, [r2]
227 pop {r4, r5}
228 bx lr
229 }
230
231
232 #define NRFX_ATOMIC_OP(asm_op, old_val, new_val, ptr, value) \
233 old_val = nrfx_atomic_internal_##asm_op(ptr, value, &new_val)
234
235 #elif defined ( __ICCARM__ ) || defined ( __GNUC__ )
236
237 /**
238 * @brief Atomic operation generic macro.
239 *
240 * @param[in] asm_op Operation: mov, orr, and, eor, add, sub.
241 * @param[out] old_val Atomic object output (uint32_t); value before operation.
242 * @param[out] new_val Atomic object output (uint32_t); value after operation.
243 * @param[in] value Atomic operation operand.
244 */
245 #define NRFX_ATOMIC_OP(asm_op, old_val, new_val, ptr, value) \
246 { \
247 uint32_t tmp_reg; \
248 __ASM volatile( \
249 "1: ldrex %["#old_val"], [%["#ptr"]]\n" \
250 NRFX_ATOMIC_OP_##asm_op(new_val, old_val, value) \
251 " strex %[tmp_reg], %["#new_val"], [%["#ptr"]]\n" \
252 " teq %[tmp_reg], #0\n" \
253 " bne.n 1b" \
254 : \
255 [old_val] "=&r" (old_val), \
256 [new_val] "=&r" (new_val), \
257 [tmp_reg] "=&r" (tmp_reg) \
258 : \
259 [ptr] "r" (ptr), \
260 [value] "r" (value) \
261 : "cc"); \
262 (void)tmp_reg; \
263 }
264
265 #define NRFX_ATOMIC_OP_mov(new_val, old_val, value) "mov %["#new_val"], %["#value"]\n"
266 #define NRFX_ATOMIC_OP_orr(new_val, old_val, value) "orr %["#new_val"], %["#old_val"], %["#value"]\n"
267 #define NRFX_ATOMIC_OP_and(new_val, old_val, value) "and %["#new_val"], %["#old_val"], %["#value"]\n"
268 #define NRFX_ATOMIC_OP_eor(new_val, old_val, value) "eor %["#new_val"], %["#old_val"], %["#value"]\n"
269 #define NRFX_ATOMIC_OP_add(new_val, old_val, value) "add %["#new_val"], %["#old_val"], %["#value"]\n"
270 #define NRFX_ATOMIC_OP_sub(new_val, old_val, value) "sub %["#new_val"], %["#old_val"], %["#value"]\n"
271 #define NRFX_ATOMIC_OP_sub_hs(new_val, old_val, value) \
272 "cmp %["#old_val"], %["#value"]\n " \
273 "ite hs\n" \
274 "subhs %["#new_val"], %["#old_val"], %["#value"]\n" \
275 "movlo %["#new_val"], %["#old_val"]\n"
276
277 static inline bool nrfx_atomic_internal_cmp_exch(nrfx_atomic_u32_t * p_data,
278 uint32_t * p_expected,
279 uint32_t value)
280 {
281 bool res = false;
282 /* Temporary register used in the inline asm code for getting the result
283 * of the strex* operations (no need to initialize it).
284 */
285 uint32_t tmp_reg;
286 uint32_t act_val = 0;
287 uint32_t exp_val = 0;
288 __ASM volatile(
289 "1: ldrex %[act_val], [%[ptr]]\n"
290 " ldr %[exp_val], [%[expc]]\n"
291 " cmp %[act_val], %[exp_val]\n"
292 " ittee eq\n"
293 " strexeq %[tmp_reg], %[value], [%[ptr]]\n"
294 " moveq %[res], #1\n"
295 " strexne %[tmp_reg], %[act_val], [%[ptr]]\n"
296 " strne %[act_val], [%[expc]]\n"
297 " cmp %[tmp_reg], #0\n"
298 " itt ne\n"
299 " movne %[res], #0\n"
300 " bne.n 1b"
301 :
302 [res] "=&r" (res),
303 [exp_val] "=&r" (exp_val),
304 [act_val] "=&r" (act_val),
305 [tmp_reg] "=&r" (tmp_reg)
306 :
307 "0" (res),
308 "1" (exp_val),
309 "2" (act_val),
310 [expc] "r" (p_expected),
311 [ptr] "r" (p_data),
312 [value] "r" (value)
313 : "cc");
314 (void)tmp_reg;
315 return res;
316 }
317
318 #else
319 #error "Unsupported compiler"
320 #endif
321
322 #ifdef __cplusplus
323 }
324 #endif
325
326 #endif // NRFX_ATOMIC_INTERNAL_H__
327