1 /* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3 
4    The GNU C Library is free software; you can redistribute it and/or
5    modify it under the terms of the GNU Lesser General Public
6    License as published by the Free Software Foundation; either
7    version 2.1 of the License, or (at your option) any later version.
8 
9    The GNU C Library is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    Lesser General Public License for more details.
13 
14    You should have received a copy of the GNU Lesser General Public
15    License along with the GNU C Library; if not, see
16    <http://www.gnu.org/licenses/>.  */
17 
18 #if defined __thumb__ && !defined __thumb2__
19 #include_next <common/bits/atomic.h>
20 #else
21 #include <stdint.h>
22 #include <sysdep.h>
23 
24 
25 typedef int8_t atomic8_t;
26 typedef uint8_t uatomic8_t;
27 typedef int_fast8_t atomic_fast8_t;
28 typedef uint_fast8_t uatomic_fast8_t;
29 
30 typedef int32_t atomic32_t;
31 typedef uint32_t uatomic32_t;
32 typedef int_fast32_t atomic_fast32_t;
33 typedef uint_fast32_t uatomic_fast32_t;
34 
35 typedef intptr_t atomicptr_t;
36 typedef uintptr_t uatomicptr_t;
37 typedef intmax_t atomic_max_t;
38 typedef uintmax_t uatomic_max_t;
39 
40 void __arm_link_error (void);
41 
42 /* Use the atomic builtins provided by GCC in case the backend provides
43    a pattern to do this efficiently.  */
44 
45 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
46 #define atomic_full_barrier() __sync_synchronize ()
47 #elif defined __thumb2__
48 #define atomic_full_barrier() \
49      __asm__ __volatile__						      \
50 	     ("movw\tip, #0x0fa0\n\t"					      \
51 	      "movt\tip, #0xffff\n\t"					      \
52 	      "blx\tip"							      \
53 	      : : : "ip", "lr", "cc", "memory");
54 #else
55 #define atomic_full_barrier() \
56      __asm__ __volatile__						      \
57 	     ("mov\tip, #0xffff0fff\n\t"				      \
58 	      "mov\tlr, pc\n\t"						      \
59 	      "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)"			      \
60 	      : : : "ip", "lr", "cc", "memory");
61 #endif
62 
63 /* Atomic compare and exchange.  This sequence relies on the kernel to
64    provide a compare and exchange operation which is atomic on the
65    current architecture, either via cleverness on pre-ARMv6 or via
66    ldrex / strex on ARMv6.  */
67 
68 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
69   ({ __arm_link_error (); oldval; })
70 
71 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
72   ({ __arm_link_error (); oldval; })
73 
74 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
75 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
76   __sync_val_compare_and_swap ((mem), (oldval), (newval))
77 
78 /* It doesn't matter what register is used for a_oldval2, but we must
79    specify one to work around GCC PR rtl-optimization/21223.  Otherwise
80    it may cause a_oldval or a_tmp to be moved to a different register.  */
81 
82 #elif defined __thumb2__
83 /* Thumb-2 has ldrex/strex.  However it does not have barrier instructions,
84    so we still need to use the kernel helper.  */
85 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
86   ({ register __typeof (oldval) a_oldval __asm__ ("r0");		      \
87      register __typeof (oldval) a_newval __asm__ ("r1") = (newval);	      \
88      register __typeof (mem) a_ptr __asm__ ("r2") = (mem);		      \
89      register __typeof (oldval) a_tmp __asm__ ("r3");			      \
90      register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval);	      \
91      __asm__ __volatile__						      \
92 	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \
93 	      "cmp\t%[tmp], %[old2]\n\t"				      \
94 	      "bne\t1f\n\t"						      \
95 	      "mov\t%[old], %[old2]\n\t"				      \
96 	      "movw\t%[tmp], #0x0fc0\n\t"				      \
97 	      "movt\t%[tmp], #0xffff\n\t"				      \
98 	      "blx\t%[tmp]\n\t"						      \
99 	      "bcc\t0b\n\t"						      \
100 	      "mov\t%[tmp], %[old2]\n\t"				      \
101 	      "1:"							      \
102 	      : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)		      \
103 	      : [new] "r" (a_newval), [ptr] "r" (a_ptr),		      \
104 		[old2] "r" (a_oldval2)					      \
105 	      : "ip", "lr", "cc", "memory");				      \
106      a_tmp; })
107 #else
108 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
109   ({ register __typeof (oldval) a_oldval __asm__ ("r0");		      \
110      register __typeof (oldval) a_newval __asm__ ("r1") = (newval);	      \
111      register __typeof (mem) a_ptr __asm__ ("r2") = (mem);		      \
112      register __typeof (oldval) a_tmp __asm__ ("r3");			      \
113      register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval);	      \
114      __asm__ __volatile__						      \
115 	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \
116 	      "cmp\t%[tmp], %[old2]\n\t"				      \
117 	      "bne\t1f\n\t"						      \
118 	      "mov\t%[old], %[old2]\n\t"				      \
119 	      "mov\t%[tmp], #0xffff0fff\n\t"				      \
120 	      "mov\tlr, pc\n\t"						      \
121 	      "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t"		      \
122 	      "bcc\t0b\n\t"						      \
123 	      "mov\t%[tmp], %[old2]\n\t"				      \
124 	      "1:"							      \
125 	      : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)		      \
126 	      : [new] "r" (a_newval), [ptr] "r" (a_ptr),		      \
127 		[old2] "r" (a_oldval2)					      \
128 	      : "ip", "lr", "cc", "memory");				      \
129      a_tmp; })
130 #endif
131 
132 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
133   ({ __arm_link_error (); oldval; })
134 
135 #endif
136