1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <unistd.h>
23 #include <not-cancel.h>
24 #include "pthreadP.h"
25 #include <lowlevellock.h>
26
27
28 #ifndef LLL_MUTEX_LOCK
29 # define LLL_MUTEX_LOCK(mutex) \
30 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
31 # define LLL_MUTEX_TRYLOCK(mutex) \
32 lll_trylock ((mutex)->__data.__lock)
33 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
34 lll_robust_lock ((mutex)->__data.__lock, id, \
35 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
36 #endif
37
38
39 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
40 __attribute_noinline__;
41
42
43 int
44 #ifdef NO_INCR
45 attribute_hidden internal_function
46 #else
47 attribute_protected
48 #endif
__pthread_mutex_lock(pthread_mutex_t * mutex)49 __pthread_mutex_lock (
50 pthread_mutex_t *mutex)
51 {
52 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
53
54 unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
55 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
56 return __pthread_mutex_lock_full (mutex);
57
58 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
59
60 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
61 == PTHREAD_MUTEX_TIMED_NP)
62 {
63 simple:
64 /* Normal mutex. */
65 LLL_MUTEX_LOCK (mutex);
66 assert (mutex->__data.__owner == 0);
67 }
68 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
69 {
70 /* Recursive mutex. */
71
72 /* Check whether we already hold the mutex. */
73 if (mutex->__data.__owner == id)
74 {
75 /* Just bump the counter. */
76 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
77 /* Overflow of the counter. */
78 return EAGAIN;
79
80 ++mutex->__data.__count;
81
82 return 0;
83 }
84
85 /* We have to get the mutex. */
86 LLL_MUTEX_LOCK (mutex);
87
88 assert (mutex->__data.__owner == 0);
89 mutex->__data.__count = 1;
90 }
91 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
92 {
93 if (! __is_smp)
94 goto simple;
95
96 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
97 {
98 int cnt = 0;
99 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
100 mutex->__data.__spins * 2 + 10);
101 do
102 {
103 if (cnt++ >= max_cnt)
104 {
105 LLL_MUTEX_LOCK (mutex);
106 break;
107 }
108
109 #ifdef BUSY_WAIT_NOP
110 BUSY_WAIT_NOP;
111 #endif
112 }
113 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
114
115 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
116 }
117 assert (mutex->__data.__owner == 0);
118 }
119 else
120 {
121 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
122 /* Check whether we already hold the mutex. */
123 if (__builtin_expect (mutex->__data.__owner == id, 0))
124 return EDEADLK;
125 goto simple;
126 }
127
128 /* Record the ownership. */
129 mutex->__data.__owner = id;
130 #ifndef NO_INCR
131 ++mutex->__data.__nusers;
132 #endif
133
134 return 0;
135 }
136
137 static int
__pthread_mutex_lock_full(pthread_mutex_t * mutex)138 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
139 {
140 int oldval;
141 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
142
143 switch (PTHREAD_MUTEX_TYPE (mutex))
144 {
145 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
146 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
147 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
148 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
149 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
150 &mutex->__data.__list.__next);
151
152 oldval = mutex->__data.__lock;
153 do
154 {
155 again:
156 if ((oldval & FUTEX_OWNER_DIED) != 0)
157 {
158 /* The previous owner died. Try locking the mutex. */
159 int newval = id;
160 #ifdef NO_INCR
161 newval |= FUTEX_WAITERS;
162 #else
163 newval |= (oldval & FUTEX_WAITERS);
164 #endif
165
166 newval
167 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
168 newval, oldval);
169
170 if (newval != oldval)
171 {
172 oldval = newval;
173 goto again;
174 }
175
176 /* We got the mutex. */
177 mutex->__data.__count = 1;
178 /* But it is inconsistent unless marked otherwise. */
179 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
180
181 ENQUEUE_MUTEX (mutex);
182 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
183
184 /* Note that we deliberately exit here. If we fall
185 through to the end of the function __nusers would be
186 incremented which is not correct because the old
187 owner has to be discounted. If we are not supposed
188 to increment __nusers we actually have to decrement
189 it here. */
190 #ifdef NO_INCR
191 --mutex->__data.__nusers;
192 #endif
193
194 return EOWNERDEAD;
195 }
196
197 /* Check whether we already hold the mutex. */
198 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
199 {
200 int kind = PTHREAD_MUTEX_TYPE (mutex);
201 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
202 {
203 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
204 NULL);
205 return EDEADLK;
206 }
207
208 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
209 {
210 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
211 NULL);
212
213 /* Just bump the counter. */
214 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
215 /* Overflow of the counter. */
216 return EAGAIN;
217
218 ++mutex->__data.__count;
219
220 return 0;
221 }
222 }
223
224 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
225
226 if (__builtin_expect (mutex->__data.__owner
227 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
228 {
229 /* This mutex is now not recoverable. */
230 mutex->__data.__count = 0;
231 lll_unlock (mutex->__data.__lock,
232 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
233 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
234 return ENOTRECOVERABLE;
235 }
236 }
237 while ((oldval & FUTEX_OWNER_DIED) != 0);
238
239 mutex->__data.__count = 1;
240 ENQUEUE_MUTEX (mutex);
241 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
242 break;
243
244 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
245 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
246 case PTHREAD_MUTEX_PI_NORMAL_NP:
247 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
248 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
249 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
250 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
251 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
252 {
253 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
254 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
255
256 if (robust)
257 /* Note: robust PI futexes are signaled by setting bit 0. */
258 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
259 (void *) (((uintptr_t) &mutex->__data.__list.__next)
260 | 1));
261
262 oldval = mutex->__data.__lock;
263
264 /* Check whether we already hold the mutex. */
265 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
266 {
267 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
268 {
269 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
270 return EDEADLK;
271 }
272
273 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
274 {
275 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
276
277 /* Just bump the counter. */
278 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
279 /* Overflow of the counter. */
280 return EAGAIN;
281
282 ++mutex->__data.__count;
283
284 return 0;
285 }
286 }
287
288 int newval = id;
289 #ifdef NO_INCR
290 newval |= FUTEX_WAITERS;
291 #endif
292 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
293 newval, 0);
294
295 if (oldval != 0)
296 {
297 /* The mutex is locked. The kernel will now take care of
298 everything. */
299 int private = (robust
300 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
301 : PTHREAD_MUTEX_PSHARED (mutex));
302 INTERNAL_SYSCALL_DECL (__err);
303 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
304 int e = INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
305 __lll_private_flag (FUTEX_LOCK_PI,
306 private), 1, 0);
307 #else
308 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
309 __lll_private_flag (FUTEX_LOCK_PI,
310 private), 1, 0);
311 #endif
312
313 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
314 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
315 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
316 {
317 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
318 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
319 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
320 /* ESRCH can happen only for non-robust PI mutexes where
321 the owner of the lock died. */
322 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
323
324 /* Delay the thread indefinitely. */
325 while (1)
326 pause_not_cancel ();
327 }
328
329 oldval = mutex->__data.__lock;
330
331 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
332 }
333
334 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
335 {
336 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
337
338 /* We got the mutex. */
339 mutex->__data.__count = 1;
340 /* But it is inconsistent unless marked otherwise. */
341 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
342
343 ENQUEUE_MUTEX_PI (mutex);
344 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
345
346 /* Note that we deliberately exit here. If we fall
347 through to the end of the function __nusers would be
348 incremented which is not correct because the old owner
349 has to be discounted. If we are not supposed to
350 increment __nusers we actually have to decrement it here. */
351 #ifdef NO_INCR
352 --mutex->__data.__nusers;
353 #endif
354
355 return EOWNERDEAD;
356 }
357
358 if (robust
359 && __builtin_expect (mutex->__data.__owner
360 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
361 {
362 /* This mutex is now not recoverable. */
363 mutex->__data.__count = 0;
364
365 INTERNAL_SYSCALL_DECL (__err);
366 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
367 INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
368 __lll_private_flag (FUTEX_UNLOCK_PI,
369 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
370 0, 0);
371 #else
372 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
373 __lll_private_flag (FUTEX_UNLOCK_PI,
374 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
375 0, 0);
376 #endif
377
378 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
379 return ENOTRECOVERABLE;
380 }
381
382 mutex->__data.__count = 1;
383 if (robust)
384 {
385 ENQUEUE_MUTEX_PI (mutex);
386 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
387 }
388 }
389 break;
390
391 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
392 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
393 case PTHREAD_MUTEX_PP_NORMAL_NP:
394 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
395 {
396 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
397
398 oldval = mutex->__data.__lock;
399
400 /* Check whether we already hold the mutex. */
401 if (mutex->__data.__owner == id)
402 {
403 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
404 return EDEADLK;
405
406 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
407 {
408 /* Just bump the counter. */
409 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
410 /* Overflow of the counter. */
411 return EAGAIN;
412
413 ++mutex->__data.__count;
414
415 return 0;
416 }
417 }
418
419 int oldprio = -1, ceilval;
420 do
421 {
422 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
423 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
424
425 if (__pthread_current_priority () > ceiling)
426 {
427 if (oldprio != -1)
428 __pthread_tpp_change_priority (oldprio, -1);
429 return EINVAL;
430 }
431
432 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
433 if (retval)
434 return retval;
435
436 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
437 oldprio = ceiling;
438
439 oldval
440 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
441 #ifdef NO_INCR
442 ceilval | 2,
443 #else
444 ceilval | 1,
445 #endif
446 ceilval);
447
448 if (oldval == ceilval)
449 break;
450
451 do
452 {
453 oldval
454 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
455 ceilval | 2,
456 ceilval | 1);
457
458 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
459 break;
460
461 if (oldval != ceilval)
462 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
463 PTHREAD_MUTEX_PSHARED (mutex));
464 }
465 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
466 ceilval | 2, ceilval)
467 != ceilval);
468 }
469 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
470
471 assert (mutex->__data.__owner == 0);
472 mutex->__data.__count = 1;
473 }
474 break;
475
476 default:
477 /* Correct code cannot set any other type. */
478 return EINVAL;
479 }
480
481 /* Record the ownership. */
482 mutex->__data.__owner = id;
483 #ifndef NO_INCR
484 ++mutex->__data.__nusers;
485 #endif
486
487 return 0;
488 }
489 #ifndef __pthread_mutex_lock
strong_alias(__pthread_mutex_lock,pthread_mutex_lock)490 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
491 strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
492 #endif
493
494
495 #ifdef NO_INCR
496 void
497 attribute_hidden internal_function
498 __pthread_mutex_cond_lock_adjust (
499 pthread_mutex_t *mutex)
500 {
501 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
502 assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
503 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
504
505 /* Record the ownership. */
506 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
507 mutex->__data.__owner = id;
508
509 if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
510 ++mutex->__data.__count;
511 }
512 #endif
513