1 /* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008,
2 2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #ifndef _LOWLEVELLOCK_H
20 #define _LOWLEVELLOCK_H 1
21
22 #include <time.h>
23 #include <sys/param.h>
24 #include <bits/pthreadtypes.h>
25 #include <atomic.h>
26 #include <sysdep.h>
27 #include <bits/kernel-features.h>
28
29 #if defined(__UCLIBC_USE_TIME64__)
30 #include "internal/time64_helpers.h"
31 #endif
32
33 #define FUTEX_WAIT 0
34 #define FUTEX_WAKE 1
35 #define FUTEX_REQUEUE 3
36 #define FUTEX_CMP_REQUEUE 4
37 #define FUTEX_WAKE_OP 5
38 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
39 #define FUTEX_LOCK_PI 6
40 #define FUTEX_UNLOCK_PI 7
41 #define FUTEX_TRYLOCK_PI 8
42 #define FUTEX_WAIT_BITSET 9
43 #define FUTEX_WAKE_BITSET 10
44 #define FUTEX_PRIVATE_FLAG 128
45 #define FUTEX_CLOCK_REALTIME 256
46
47 #define FUTEX_BITSET_MATCH_ANY 0xffffffff
48
49 /* Values for 'private' parameter of locking macros. Yes, the
50 definition seems to be backwards. But it is not. The bit will be
51 reversed before passing to the system call. */
52 #define LLL_PRIVATE 0
53 #define LLL_SHARED FUTEX_PRIVATE_FLAG
54
55
56 #if !defined NOT_IN_libc || defined IS_IN_rtld
57 /* In libc.so or ld.so all futexes are private. */
58 # ifdef __ASSUME_PRIVATE_FUTEX
59 # define __lll_private_flag(fl, private) \
60 ((fl) | FUTEX_PRIVATE_FLAG)
61 # else
62 # define __lll_private_flag(fl, private) \
63 ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
64 # endif
65 #else
66 # ifdef __ASSUME_PRIVATE_FUTEX
67 # define __lll_private_flag(fl, private) \
68 (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
69 # else
70 # define __lll_private_flag(fl, private) \
71 (__builtin_constant_p (private) \
72 ? ((private) == 0 \
73 ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
74 : (fl)) \
75 : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
76 & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
77 # endif
78 #endif
79
80
81 #define lll_futex_wait(futexp, val, private) \
82 lll_futex_timed_wait(futexp, val, NULL, private)
83
84 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
85
86 #define lll_futex_timed_wait(futexp, val, timespec, private) \
87 ({ \
88 INTERNAL_SYSCALL_DECL (__err); \
89 long int __ret attribute_unused; \
90 __ret = INTERNAL_SYSCALL (futex_time64, __err, 4, (long) (futexp), \
91 __lll_private_flag (FUTEX_WAIT, private), \
92 (val), (TO_TS64_P(timespec))); \
93 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
94 })
95
96 #define lll_futex_wake(futexp, nr, private) \
97 ({ \
98 INTERNAL_SYSCALL_DECL (__err); \
99 long int __ret attribute_unused; \
100 __ret = INTERNAL_SYSCALL (futex_time64, __err, 4, (long) (futexp), \
101 __lll_private_flag (FUTEX_WAKE, private), \
102 (nr), 0); \
103 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
104 })
105
106 #else
107
108 #define lll_futex_timed_wait(futexp, val, timespec, private) \
109 ({ \
110 INTERNAL_SYSCALL_DECL (__err); \
111 long int __ret attribute_unused; \
112 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
113 __lll_private_flag (FUTEX_WAIT, private), \
114 (val), (timespec)); \
115 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
116 })
117
118 #define lll_futex_wake(futexp, nr, private) \
119 ({ \
120 INTERNAL_SYSCALL_DECL (__err); \
121 long int __ret attribute_unused; \
122 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
123 __lll_private_flag (FUTEX_WAKE, private), \
124 (nr), 0); \
125 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
126 })
127
128 #endif
129
130 #define lll_robust_dead(futexv, private) \
131 do \
132 { \
133 int *__futexp = &(futexv); \
134 atomic_or (__futexp, FUTEX_OWNER_DIED); \
135 lll_futex_wake (__futexp, 1, private); \
136 } \
137 while (0)
138
139 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
140
141 /* Returns non-zero if error happened, zero if success. */
142 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
143 ({ \
144 INTERNAL_SYSCALL_DECL (__err); \
145 long int __ret attribute_unused; \
146 __ret = INTERNAL_SYSCALL (futex_time64, __err, 6, (long) (futexp), \
147 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
148 (nr_wake), (nr_move), (mutex), (val)); \
149 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
150 })
151
152 /* Returns non-zero if error happened, zero if success. */
153 #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
154 ({ \
155 INTERNAL_SYSCALL_DECL (__err); \
156 long int __ret attribute_unused; \
157 \
158 __ret = INTERNAL_SYSCALL (futex_time64, __err, 6, (futexp), \
159 __lll_private_flag (FUTEX_WAKE_OP, private), \
160 (nr_wake), (nr_wake2), (futexp2), \
161 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
162 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
163 })
164
165
166 #else
167
168 /* Returns non-zero if error happened, zero if success. */
169 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
170 ({ \
171 INTERNAL_SYSCALL_DECL (__err); \
172 long int __ret attribute_unused; \
173 __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
174 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
175 (nr_wake), (nr_move), (mutex), (val)); \
176 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
177 })
178
179 /* Returns non-zero if error happened, zero if success. */
180 #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
181 ({ \
182 INTERNAL_SYSCALL_DECL (__err); \
183 long int __ret attribute_unused; \
184 \
185 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
186 __lll_private_flag (FUTEX_WAKE_OP, private), \
187 (nr_wake), (nr_wake2), (futexp2), \
188 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
189 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
190 })
191
192 #endif
193
194 static inline int __attribute__((always_inline))
__lll_trylock(int * futex)195 __lll_trylock(int *futex)
196 {
197 return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
198 }
199 #define lll_trylock(lock) __lll_trylock (&(lock))
200
201
202 static inline int __attribute__((always_inline))
__lll_cond_trylock(int * futex)203 __lll_cond_trylock(int *futex)
204 {
205 return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
206 }
207 #define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
208
209
210 static inline int __attribute__((always_inline))
__lll_robust_trylock(int * futex,int id)211 __lll_robust_trylock(int *futex, int id)
212 {
213 return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
214 }
215 #define lll_robust_trylock(lock, id) \
216 __lll_robust_trylock (&(lock), id)
217
218 extern void __lll_lock_wait_private (int *futex) attribute_hidden;
219 extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
220 extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
221
222 #define __lll_lock(futex, private) \
223 ((void) ({ \
224 int *__futex = (futex); \
225 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
226 1, 0), 0)) \
227 { \
228 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
229 __lll_lock_wait_private (__futex); \
230 else \
231 __lll_lock_wait (__futex, private); \
232 } \
233 }))
234 #define lll_lock(futex, private) __lll_lock (&(futex), private)
235
236
237 #define __lll_robust_lock(futex, id, private) \
238 ({ \
239 int *__futex = (futex); \
240 int __val = 0; \
241 \
242 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
243 0), 0)) \
244 __val = __lll_robust_lock_wait (__futex, private); \
245 __val; \
246 })
247 #define lll_robust_lock(futex, id, private) \
248 __lll_robust_lock (&(futex), id, private)
249
250
251 static inline void __attribute__ ((always_inline))
__lll_cond_lock(int * futex,int private)252 __lll_cond_lock (int *futex, int private)
253 {
254 if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
255 __lll_lock_wait (futex, private);
256 }
257 #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
258
259
260 #define lll_robust_cond_lock(futex, id, private) \
261 __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
262
263
264 extern int __lll_timedlock_wait (int *futex, const struct timespec *,
265 int private) attribute_hidden;
266 extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
267 int private) attribute_hidden;
268
269 static inline int __attribute__ ((always_inline))
__lll_timedlock(int * futex,const struct timespec * abstime,int private)270 __lll_timedlock (int *futex, const struct timespec *abstime, int private)
271 {
272 int result = 0;
273 if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
274 result = __lll_timedlock_wait (futex, abstime, private);
275 return result;
276 }
277 #define lll_timedlock(futex, abstime, private) \
278 __lll_timedlock (&(futex), abstime, private)
279
280
281 static inline int __attribute__ ((always_inline))
__lll_robust_timedlock(int * futex,const struct timespec * abstime,int id,int private)282 __lll_robust_timedlock (int *futex, const struct timespec *abstime,
283 int id, int private)
284 {
285 int result = 0;
286 if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
287 result = __lll_robust_timedlock_wait (futex, abstime, private);
288 return result;
289 }
290 #define lll_robust_timedlock(futex, abstime, id, private) \
291 __lll_robust_timedlock (&(futex), abstime, id, private)
292
293
294 #define __lll_unlock(futex, private) \
295 ((void) ({ \
296 int *__futex = (futex); \
297 int __val = atomic_exchange_rel (__futex, 0); \
298 \
299 if (__builtin_expect (__val > 1, 0)) \
300 lll_futex_wake (__futex, 1, private); \
301 }))
302 #define lll_unlock(futex, private) __lll_unlock(&(futex), private)
303
304
305 #define __lll_robust_unlock(futex, private) \
306 ((void) ({ \
307 int *__futex = (futex); \
308 int __val = atomic_exchange_rel (__futex, 0); \
309 \
310 if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
311 lll_futex_wake (__futex, 1, private); \
312 }))
313 #define lll_robust_unlock(futex, private) \
314 __lll_robust_unlock(&(futex), private)
315
316
317 #define lll_islocked(futex) \
318 (futex != 0)
319
320
321 /* Our internal lock implementation is identical to the binary-compatible
322 mutex implementation. */
323
324 /* Initializers for lock. */
325 #define LLL_LOCK_INITIALIZER (0)
326 #define LLL_LOCK_INITIALIZER_LOCKED (1)
327
328 /* The states of a lock are:
329 0 - untaken
330 1 - taken by one user
331 >1 - taken by more users */
332
333 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
334 wakeup when the clone terminates. The memory location contains the
335 thread ID while the clone is running and is reset to zero
336 afterwards. */
337 #define lll_wait_tid(tid) \
338 do { \
339 __typeof (tid) __tid; \
340 while ((__tid = (tid)) != 0) \
341 lll_futex_wait (&(tid), __tid, LLL_SHARED); \
342 } while (0)
343
344 extern int __lll_timedwait_tid (int *, const struct timespec *)
345 attribute_hidden;
346
347 #define lll_timedwait_tid(tid, abstime) \
348 ({ \
349 int __res = 0; \
350 if ((tid) != 0) \
351 __res = __lll_timedwait_tid (&(tid), (abstime)); \
352 __res; \
353 })
354
355 #endif /* lowlevellock.h */
356