1 /* 64bit Linux-specific atomic operations for ARM EABI.
2    Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
3    Based on linux-atomic.c
4 
5    64 bit additions david.gilbert@linaro.org
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 Under Section 7 of GPL version 3, you are granted additional
20 permissions described in the GCC Runtime Library Exception, version
21 3.1, as published by the Free Software Foundation.
22 
23 You should have received a copy of the GNU General Public License and
24 a copy of the GCC Runtime Library Exception along with this program;
25 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
26 <http://www.gnu.org/licenses/>.  */
27 
28 /* 64bit helper functions for atomic operations; the compiler will
29    call these when the code is compiled for a CPU without ldrexd/strexd.
30    (If the CPU had those then the compiler inlines the operation).
31 
32    These helpers require a kernel helper that's only present on newer
33    kernels; we check for that in an init section and bail out rather
34    unceremoneously.  */
35 
36 #ifndef IS_L4
37 extern unsigned int __write (int fd, const void *buf, unsigned int count);
38 extern void abort (void);
39 #endif
40 
41 /* Kernel helper for compare-and-exchange.  */
42 typedef int (__kernel_cmpxchg64_t) (const long long* oldval,
43 					const long long* newval,
44 					long long *ptr);
45 #define __kernel_cmpxchg64 (*(__kernel_cmpxchg64_t *) 0xffff0f60)
46 
47 /* Kernel helper page version number.  */
48 #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
49 
50 /* Check that the kernel has a new enough version at load.  */
__check_for_sync8_kernelhelper(void)51 static void __check_for_sync8_kernelhelper (void)
52 {
53 #ifndef IS_L4
54   if (__kernel_helper_version < 5)
55     {
56       const char err[] = "A newer kernel is required to run this binary. "
57 				"(__kernel_cmpxchg64 helper)\n";
58       /* At this point we need a way to crash with some information
59 	 for the user - I'm not sure I can rely on much else being
60 	 available at this point, so do the same as generic-morestack.c
61 	 write () and abort ().  */
62       __write (2 /* stderr.  */, err, sizeof (err));
63       abort ();
64     }
65 #endif
66 };
67 
68 static void (*__sync8_kernelhelper_inithook[]) (void)
69 		__attribute__ ((used, section (".init_array"))) = {
70   &__check_for_sync8_kernelhelper
71 };
72 
73 #define HIDDEN __attribute__ ((visibility ("hidden")))
74 
75 #define FETCH_AND_OP_WORD64(OP, PFX_OP, INF_OP)			\
76   long long HIDDEN						\
77   __sync_fetch_and_##OP##_8 (long long *ptr, long long val)	\
78   {								\
79     int failure;						\
80     long long tmp,tmp2;						\
81 								\
82     do {							\
83       tmp = *ptr;						\
84       tmp2 = PFX_OP (tmp INF_OP val);				\
85       failure = __kernel_cmpxchg64 (&tmp, &tmp2, ptr);		\
86     } while (failure != 0);					\
87 								\
88     return tmp;							\
89   }
90 
91 FETCH_AND_OP_WORD64 (add,   , +)
92 FETCH_AND_OP_WORD64 (sub,   , -)
93 FETCH_AND_OP_WORD64 (or,    , |)
94 FETCH_AND_OP_WORD64 (and,   , &)
95 FETCH_AND_OP_WORD64 (xor,   , ^)
96 FETCH_AND_OP_WORD64 (nand, ~, &)
97 
98 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
99 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
100 
101 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
102    subword-sized quantities.  */
103 
104 #define OP_AND_FETCH_WORD64(OP, PFX_OP, INF_OP)			\
105   long long HIDDEN						\
106   __sync_##OP##_and_fetch_8 (long long *ptr, long long val)	\
107   {								\
108     int failure;						\
109     long long tmp,tmp2;						\
110 								\
111     do {							\
112       tmp = *ptr;						\
113       tmp2 = PFX_OP (tmp INF_OP val);				\
114       failure = __kernel_cmpxchg64 (&tmp, &tmp2, ptr);		\
115     } while (failure != 0);					\
116 								\
117     return tmp2;						\
118   }
119 
120 OP_AND_FETCH_WORD64 (add,   , +)
121 OP_AND_FETCH_WORD64 (sub,   , -)
122 OP_AND_FETCH_WORD64 (or,    , |)
123 OP_AND_FETCH_WORD64 (and,   , &)
124 OP_AND_FETCH_WORD64 (xor,   , ^)
125 OP_AND_FETCH_WORD64 (nand, ~, &)
126 
127 long long HIDDEN
__sync_val_compare_and_swap_8(long long * ptr,long long oldval,long long newval)128 __sync_val_compare_and_swap_8 (long long *ptr, long long oldval,
129 				long long newval)
130 {
131   int failure;
132   long long actual_oldval;
133 
134   while (1)
135     {
136       actual_oldval = *ptr;
137 
138       if (__builtin_expect (oldval != actual_oldval, 0))
139 	return actual_oldval;
140 
141       failure = __kernel_cmpxchg64 (&actual_oldval, &newval, ptr);
142 
143       if (__builtin_expect (!failure, 1))
144 	return oldval;
145     }
146 }
147 
148 typedef unsigned char bool;
149 
150 bool HIDDEN
__sync_bool_compare_and_swap_8(long long * ptr,long long oldval,long long newval)151 __sync_bool_compare_and_swap_8 (long long *ptr, long long oldval,
152 				 long long newval)
153 {
154   int failure = __kernel_cmpxchg64 (&oldval, &newval, ptr);
155   return (failure == 0);
156 }
157 
158 long long HIDDEN
__sync_lock_test_and_set_8(long long * ptr,long long val)159 __sync_lock_test_and_set_8 (long long *ptr, long long val)
160 {
161   int failure;
162   long long oldval;
163 
164   do {
165     oldval = *ptr;
166     failure = __kernel_cmpxchg64 (&oldval, &val, ptr);
167   } while (failure != 0);
168 
169   return oldval;
170 }
171