1 /* Atomic operations.  PowerPC32 version.
2    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5 
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Lesser General Public
8    License as published by the Free Software Foundation; either
9    version 2.1 of the License, or (at your option) any later version.
10 
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Lesser General Public License for more details.
15 
16    You should have received a copy of the GNU Lesser General Public
17    License along with the GNU C Library; if not, see
18    <http://www.gnu.org/licenses/>.  */
19 
20 # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval)         \
21 ({									      \
22   unsigned int __tmp;							      \
23   __asm__ __volatile__ (							      \
24 		    "1:	lwarx	%0,0,%1\n"				      \
25 		    "	subf.	%0,%2,%0\n"				      \
26 		    "	bne	2f\n"					      \
27 		    "	stwcx.	%3,0,%1\n"				      \
28 		    "	bne-	1b\n"					      \
29 		    "2:	" __ARCH_ACQ_INSTR				      \
30 		    : "=&r" (__tmp)					      \
31 		    : "b" (mem), "r" (oldval), "r" (newval)		      \
32 		    : "cr0", "memory");					      \
33   __tmp != 0;								      \
34 })
35 
36 # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval)	      \
37 ({									      \
38   unsigned int __tmp;							      \
39   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"				      \
40 		    "1:	lwarx	%0,0,%1\n"				      \
41 		    "	subf.	%0,%2,%0\n"				      \
42 		    "	bne	2f\n"					      \
43 		    "	stwcx.	%3,0,%1\n"				      \
44 		    "	bne-	1b\n"					      \
45 		    "2:	"						      \
46 		    : "=&r" (__tmp)					      \
47 		    : "b" (mem), "r" (oldval), "r" (newval)		      \
48 		    : "cr0", "memory");					      \
49   __tmp != 0;								      \
50 })
51 
52 /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
53    load and reserve (ldarx) and store conditional (stdcx.) instructions.
54    So for powerpc32 we stub out the 64-bit forms.  */
55 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
56   (abort (), 0)
57 
58 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
59   (abort (), (__typeof (*mem)) 0)
60 
61 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
62   (abort (), 0)
63 
64 # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
65   (abort (), (__typeof (*mem)) 0)
66 
67 # define __arch_atomic_exchange_64_acq(mem, value) \
68     ({ abort (); (*mem) = (value); })
69 
70 # define __arch_atomic_exchange_64_rel(mem, value) \
71     ({ abort (); (*mem) = (value); })
72 
73 # define __arch_atomic_exchange_and_add_64(mem, value) \
74     ({ abort (); (*mem) = (value); })
75 
76 # define __arch_atomic_increment_val_64(mem) \
77     ({ abort (); (*mem)++; })
78 
79 # define __arch_atomic_decrement_val_64(mem) \
80     ({ abort (); (*mem)--; })
81 
82 # define __arch_atomic_decrement_if_positive_64(mem) \
83     ({ abort (); (*mem)--; })
84 
85 #ifdef _ARCH_PWR4
86 /*
87  * Newer powerpc64 processors support the new "light weight" sync (lwsync)
88  * So if the build is using -mcpu=[power4,power5,power5+,970] we can
89  * safely use lwsync.
90  */
91 # define atomic_read_barrier()	__asm__ ("lwsync" ::: "memory")
92 /*
93  * "light weight" sync can also be used for the release barrier.
94  */
95 # ifndef UP
96 #  define __ARCH_REL_INSTR	"lwsync"
97 # endif
98 #else
99 
100 /*
101  * Older powerpc32 processors don't support the new "light weight"
102  * sync (lwsync).  So the only safe option is to use normal sync
103  * for all powerpc32 applications.
104  */
105 # define atomic_read_barrier()	__asm__ ("sync" ::: "memory")
106 #endif
107 
108 #include <stdint.h>
109 
110 typedef int32_t atomic32_t;
111 typedef uint32_t uatomic32_t;
112 typedef int_fast32_t atomic_fast32_t;
113 typedef uint_fast32_t uatomic_fast32_t;
114 
115 typedef int64_t atomic64_t;
116 typedef uint64_t uatomic64_t;
117 typedef int_fast64_t atomic_fast64_t;
118 typedef uint_fast64_t uatomic_fast64_t;
119 
120 typedef intptr_t atomicptr_t;
121 typedef uintptr_t uatomicptr_t;
122 typedef intmax_t atomic_max_t;
123 typedef uintmax_t uatomic_max_t;
124 
125 /*
126  * Powerpc does not have byte and halfword forms of load and reserve and
127  * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
128  */
129 #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
130   (abort (), 0)
131 
132 #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
133   (abort (), 0)
134 
135 #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
136   (abort (), 0)
137 
138 #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
139   (abort (), 0)
140 
141 #ifdef UP
142 # define __ARCH_ACQ_INSTR	""
143 # define __ARCH_REL_INSTR	""
144 #else
145 # define __ARCH_ACQ_INSTR	"isync"
146 # ifndef __ARCH_REL_INSTR
147 #  define __ARCH_REL_INSTR	"sync"
148 # endif
149 #endif
150 
151 #ifndef MUTEX_HINT_ACQ
152 # define MUTEX_HINT_ACQ
153 #endif
154 #ifndef MUTEX_HINT_REL
155 # define MUTEX_HINT_REL
156 #endif
157 
158 #define atomic_full_barrier()	__asm__ ("sync" ::: "memory")
159 #define atomic_write_barrier()	__asm__ ("eieio" ::: "memory")
160 
161 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)	      \
162   ({									      \
163       __typeof (*(mem)) __tmp;						      \
164       __typeof (mem)  __memp = (mem);					      \
165       __asm__ __volatile__ (						      \
166 		        "1:	lwarx	%0,0,%1\n"			      \
167 		        "	cmpw	%0,%2\n"			      \
168 		        "	bne	2f\n"				      \
169 		        "	stwcx.	%3,0,%1\n"			      \
170 		        "	bne-	1b\n"				      \
171 		        "2:	" __ARCH_ACQ_INSTR			      \
172 		        : "=&r" (__tmp)					      \
173 		        : "b" (__memp), "r" (oldval), "r" (newval)	      \
174 		        : "cr0", "memory");				      \
175       __tmp;								      \
176   })
177 
178 #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval)	      \
179   ({									      \
180       __typeof (*(mem)) __tmp;						      \
181       __typeof (mem)  __memp = (mem);					      \
182       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"				      \
183 		        "1:	lwarx	%0,0,%1\n"			      \
184 		        "	cmpw	%0,%2\n"			      \
185 		        "	bne	2f\n"				      \
186 		        "	stwcx.	%3,0,%1\n"			      \
187 		        "	bne-	1b\n"				      \
188 		        "2:	"					      \
189 		        : "=&r" (__tmp)					      \
190 		        : "b" (__memp), "r" (oldval), "r" (newval)	      \
191 		        : "cr0", "memory");				      \
192       __tmp;								      \
193   })
194 
195 #define __arch_atomic_exchange_32_acq(mem, value)			      \
196   ({									      \
197     __typeof (*mem) __val;						      \
198     __asm__ __volatile__ (							      \
199 		      "1:	lwarx	%0,0,%2\n"			      \
200 		      "		stwcx.	%3,0,%2\n"			      \
201 		      "		bne-	1b\n"				      \
202 		      "   " __ARCH_ACQ_INSTR				      \
203 		      : "=&r" (__val), "=m" (*mem)			      \
204 		      : "b" (mem), "r" (value), "m" (*mem)		      \
205 		      : "cr0", "memory");				      \
206     __val;								      \
207   })
208 
209 #define __arch_atomic_exchange_32_rel(mem, value) \
210   ({									      \
211     __typeof (*mem) __val;						      \
212     __asm__ __volatile__ (__ARCH_REL_INSTR "\n"				      \
213 		      "1:	lwarx	%0,0,%2\n"			      \
214 		      "		stwcx.	%3,0,%2\n"			      \
215 		      "		bne-	1b"				      \
216 		      : "=&r" (__val), "=m" (*mem)			      \
217 		      : "b" (mem), "r" (value), "m" (*mem)		      \
218 		      : "cr0", "memory");				      \
219     __val;								      \
220   })
221 
222 #define __arch_atomic_exchange_and_add_32(mem, value) \
223   ({									      \
224     __typeof (*mem) __val, __tmp;					      \
225     __asm__ __volatile__ ("1:	lwarx	%0,0,%3\n"			      \
226 		      "		add	%1,%0,%4\n"			      \
227 		      "		stwcx.	%1,0,%3\n"			      \
228 		      "		bne-	1b"				      \
229 		      : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
230 		      : "b" (mem), "r" (value), "m" (*mem)		      \
231 		      : "cr0", "memory");				      \
232     __val;								      \
233   })
234 
235 #define __arch_atomic_increment_val_32(mem) \
236   ({									      \
237     __typeof (*(mem)) __val;						      \
238     __asm__ __volatile__ ("1:	lwarx	%0,0,%2\n"			      \
239 		      "		addi	%0,%0,1\n"			      \
240 		      "		stwcx.	%0,0,%2\n"			      \
241 		      "		bne-	1b"				      \
242 		      : "=&b" (__val), "=m" (*mem)			      \
243 		      : "b" (mem), "m" (*mem)				      \
244 		      : "cr0", "memory");				      \
245     __val;								      \
246   })
247 
248 #define __arch_atomic_decrement_val_32(mem) \
249   ({									      \
250     __typeof (*(mem)) __val;						      \
251     __asm__ __volatile__ ("1:	lwarx	%0,0,%2\n"			      \
252 		      "		subi	%0,%0,1\n"			      \
253 		      "		stwcx.	%0,0,%2\n"			      \
254 		      "		bne-	1b"				      \
255 		      : "=&b" (__val), "=m" (*mem)			      \
256 		      : "b" (mem), "m" (*mem)				      \
257 		      : "cr0", "memory");				      \
258     __val;								      \
259   })
260 
261 #define __arch_atomic_decrement_if_positive_32(mem) \
262   ({ int __val, __tmp;							      \
263      __asm__ __volatile__ ("1:	lwarx	%0,0,%3\n"			      \
264 		       "	cmpwi	0,%0,0\n"			      \
265 		       "	addi	%1,%0,-1\n"			      \
266 		       "	ble	2f\n"				      \
267 		       "	stwcx.	%1,0,%3\n"			      \
268 		       "	bne-	1b\n"				      \
269 		       "2:	" __ARCH_ACQ_INSTR			      \
270 		       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
271 		       : "b" (mem), "m" (*mem)				      \
272 		       : "cr0", "memory");				      \
273      __val;								      \
274   })
275 
276 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
277   ({									      \
278     __typeof (*(mem)) __result;						      \
279     if (sizeof (*mem) == 4)						      \
280       __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
281     else if (sizeof (*mem) == 8)					      \
282       __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
283     else								      \
284        abort ();							      \
285     __result;								      \
286   })
287 
288 #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
289   ({									      \
290     __typeof (*(mem)) __result;						      \
291     if (sizeof (*mem) == 4)						      \
292       __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
293     else if (sizeof (*mem) == 8)					      \
294       __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
295     else								      \
296        abort ();							      \
297     __result;								      \
298   })
299 
300 #define atomic_exchange_acq(mem, value) \
301   ({									      \
302     __typeof (*(mem)) __result;						      \
303     if (sizeof (*mem) == 4)						      \
304       __result = __arch_atomic_exchange_32_acq (mem, value);		      \
305     else if (sizeof (*mem) == 8)					      \
306       __result = __arch_atomic_exchange_64_acq (mem, value);		      \
307     else								      \
308        abort ();							      \
309     __result;								      \
310   })
311 
312 #define atomic_exchange_rel(mem, value) \
313   ({									      \
314     __typeof (*(mem)) __result;						      \
315     if (sizeof (*mem) == 4)						      \
316       __result = __arch_atomic_exchange_32_rel (mem, value);		      \
317     else if (sizeof (*mem) == 8)					      \
318       __result = __arch_atomic_exchange_64_rel (mem, value);		      \
319     else								      \
320        abort ();							      \
321     __result;								      \
322   })
323 
324 #define atomic_exchange_and_add(mem, value) \
325   ({									      \
326     __typeof (*(mem)) __result;						      \
327     if (sizeof (*mem) == 4)						      \
328       __result = __arch_atomic_exchange_and_add_32 (mem, value);	      \
329     else if (sizeof (*mem) == 8)					      \
330       __result = __arch_atomic_exchange_and_add_64 (mem, value);	      \
331     else								      \
332        abort ();							      \
333     __result;								      \
334   })
335 
336 #define atomic_increment_val(mem) \
337   ({									      \
338     __typeof (*(mem)) __result;						      \
339     if (sizeof (*(mem)) == 4)						      \
340       __result = __arch_atomic_increment_val_32 (mem);			      \
341     else if (sizeof (*(mem)) == 8)					      \
342       __result = __arch_atomic_increment_val_64 (mem);			      \
343     else								      \
344        abort ();							      \
345     __result;								      \
346   })
347 
348 #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
349 
350 #define atomic_decrement_val(mem) \
351   ({									      \
352     __typeof (*(mem)) __result;						      \
353     if (sizeof (*(mem)) == 4)						      \
354       __result = __arch_atomic_decrement_val_32 (mem);			      \
355     else if (sizeof (*(mem)) == 8)					      \
356       __result = __arch_atomic_decrement_val_64 (mem);			      \
357     else								      \
358        abort ();							      \
359     __result;								      \
360   })
361 
362 #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
363 
364 
365 /* Decrement *MEM if it is > 0, and return the old value.  */
366 #define atomic_decrement_if_positive(mem) \
367   ({ __typeof (*(mem)) __result;					      \
368     if (sizeof (*mem) == 4)						      \
369       __result = __arch_atomic_decrement_if_positive_32 (mem);		      \
370     else if (sizeof (*mem) == 8)					      \
371       __result = __arch_atomic_decrement_if_positive_64 (mem);		      \
372     else								      \
373        abort ();							      \
374     __result;								      \
375   })
376