1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18 
19 #include <assert.h>
20 #include <errno.h>
21 #include <time.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
25 
26 #if defined(__UCLIBC_USE_TIME64__)
27 #include "internal/time64_helpers.h"
28 #endif
29 
30 /* We need to build this function with optimization to avoid
31  * lll_timedlock erroring out with
32  * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
33  */
34 int
35 #ifndef  __OPTIMIZE__
36 attribute_optimize("Os")
37 #endif
pthread_mutex_timedlock(pthread_mutex_t * mutex,const struct timespec * abstime)38 pthread_mutex_timedlock (
39      pthread_mutex_t *mutex,
40      const struct timespec *abstime)
41 {
42   int oldval;
43   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
44   int result = 0;
45 
46   /* We must not check ABSTIME here.  If the thread does not block
47      abstime must not be checked for a valid value.  */
48 
49   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
50 			    PTHREAD_MUTEX_TIMED_NP))
51     {
52       /* Recursive mutex.  */
53     case PTHREAD_MUTEX_RECURSIVE_NP:
54       /* Check whether we already hold the mutex.  */
55       if (mutex->__data.__owner == id)
56 	{
57 	  /* Just bump the counter.  */
58 	  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
59 	    /* Overflow of the counter.  */
60 	    return EAGAIN;
61 
62 	  ++mutex->__data.__count;
63 
64 	  goto out;
65 	}
66 
67       /* We have to get the mutex.  */
68       result = lll_timedlock (mutex->__data.__lock, abstime,
69 			      PTHREAD_MUTEX_PSHARED (mutex));
70 
71       if (result != 0)
72 	goto out;
73 
74       /* Only locked once so far.  */
75       mutex->__data.__count = 1;
76       break;
77 
78       /* Error checking mutex.  */
79     case PTHREAD_MUTEX_ERRORCHECK_NP:
80       /* Check whether we already hold the mutex.  */
81       if (__builtin_expect (mutex->__data.__owner == id, 0))
82 	return EDEADLK;
83 
84       /* FALLTHROUGH */
85 
86     case PTHREAD_MUTEX_TIMED_NP:
87     simple:
88       /* Normal mutex.  */
89       result = lll_timedlock (mutex->__data.__lock, abstime,
90 			      PTHREAD_MUTEX_PSHARED (mutex));
91       break;
92 
93     case PTHREAD_MUTEX_ADAPTIVE_NP:
94       if (! __is_smp)
95 	goto simple;
96 
97       if (lll_trylock (mutex->__data.__lock) != 0)
98 	{
99 	  int cnt = 0;
100 	  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
101 			     mutex->__data.__spins * 2 + 10);
102 	  do
103 	    {
104 	      if (cnt++ >= max_cnt)
105 		{
106 		  result = lll_timedlock (mutex->__data.__lock, abstime,
107 					  PTHREAD_MUTEX_PSHARED (mutex));
108 		  break;
109 		}
110 
111 #ifdef BUSY_WAIT_NOP
112 	      BUSY_WAIT_NOP;
113 #endif
114 	    }
115 	  while (lll_trylock (mutex->__data.__lock) != 0);
116 
117 	  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
118 	}
119       break;
120 
121     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
122     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
123     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
124     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
125       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
126 		     &mutex->__data.__list.__next);
127 
128       oldval = mutex->__data.__lock;
129       do
130 	{
131 	again:
132 	  if ((oldval & FUTEX_OWNER_DIED) != 0)
133 	    {
134 	      /* The previous owner died.  Try locking the mutex.  */
135 	      int newval = id | (oldval & FUTEX_WAITERS);
136 
137 	      newval
138 		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
139 						       newval, oldval);
140 	      if (newval != oldval)
141 		{
142 		  oldval = newval;
143 		  goto again;
144 		}
145 
146 	      /* We got the mutex.  */
147 	      mutex->__data.__count = 1;
148 	      /* But it is inconsistent unless marked otherwise.  */
149 	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
150 
151 	      ENQUEUE_MUTEX (mutex);
152 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
153 
154 	      /* Note that we deliberately exit here.  If we fall
155 		 through to the end of the function __nusers would be
156 		 incremented which is not correct because the old
157 		 owner has to be discounted.  */
158 	      return EOWNERDEAD;
159 	    }
160 
161 	  /* Check whether we already hold the mutex.  */
162 	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
163 	    {
164 	      int kind = PTHREAD_MUTEX_TYPE (mutex);
165 	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
166 		{
167 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
168 				 NULL);
169 		  return EDEADLK;
170 		}
171 
172 	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
173 		{
174 		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
175 				 NULL);
176 
177 		  /* Just bump the counter.  */
178 		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
179 		    /* Overflow of the counter.  */
180 		    return EAGAIN;
181 
182 		  ++mutex->__data.__count;
183 
184 		  return 0;
185 		}
186 	    }
187 
188 	  result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
189 					 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
190 
191 	  if (__builtin_expect (mutex->__data.__owner
192 				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
193 	    {
194 	      /* This mutex is now not recoverable.  */
195 	      mutex->__data.__count = 0;
196 	      lll_unlock (mutex->__data.__lock,
197 			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
198 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
199 	      return ENOTRECOVERABLE;
200 	    }
201 
202 	  if (result == ETIMEDOUT || result == EINVAL)
203 	    goto out;
204 
205 	  oldval = result;
206 	}
207       while ((oldval & FUTEX_OWNER_DIED) != 0);
208 
209       mutex->__data.__count = 1;
210       ENQUEUE_MUTEX (mutex);
211       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
212       break;
213 
214     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
215     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
216     case PTHREAD_MUTEX_PI_NORMAL_NP:
217     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
218     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
219     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
220     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
221     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
222       {
223 	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
224 	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
225 
226 	if (robust)
227 	  /* Note: robust PI futexes are signaled by setting bit 0.  */
228 	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
229 			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
230 				   | 1));
231 
232 	oldval = mutex->__data.__lock;
233 
234 	/* Check whether we already hold the mutex.  */
235 	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
236 	  {
237 	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
238 	      {
239 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
240 		return EDEADLK;
241 	      }
242 
243 	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
244 	      {
245 		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
246 
247 		/* Just bump the counter.  */
248 		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
249 		  /* Overflow of the counter.  */
250 		  return EAGAIN;
251 
252 		++mutex->__data.__count;
253 
254 		return 0;
255 	      }
256 	  }
257 
258 	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
259 						      id, 0);
260 
261 	if (oldval != 0)
262 	  {
263 	    /* The mutex is locked.  The kernel will now take care of
264 	       everything.  The timeout value must be a relative value.
265 	       Convert it.  */
266 	    int private = (robust
267 			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
268 			   : PTHREAD_MUTEX_PSHARED (mutex));
269 	    INTERNAL_SYSCALL_DECL (__err);
270 
271 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
272 	    int e = INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
273 				      __lll_private_flag (FUTEX_LOCK_PI,
274 							  private), 1,
275 				      TO_TS64_P(abstime));
276 #else
277 	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
278 				      __lll_private_flag (FUTEX_LOCK_PI,
279 							  private), 1,
280 				      abstime);
281 #endif
282 	    if (INTERNAL_SYSCALL_ERROR_P (e, __err))
283 	      {
284 		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
285 		  return ETIMEDOUT;
286 
287 		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
288 		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
289 		  {
290 		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
291 			    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
292 				&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
293 		    /* ESRCH can happen only for non-robust PI mutexes where
294 		       the owner of the lock died.  */
295 		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
296 			    || !robust);
297 
298 		    /* Delay the thread until the timeout is reached.
299 		       Then return ETIMEDOUT.  */
300 		    struct timespec reltime;
301 #if defined(__UCLIBC_USE_TIME64__)
302 		    struct __ts64_struct __now64;
303 #endif
304 		    struct timespec now = {.tv_sec = 0, .tv_nsec = 0};
305 
306 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_clock_gettime64)
307 		    int __r = INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
308 				      &__now64);
309 
310 		    if (__r == 0) {
311 			now.tv_sec = __now64.tv_sec;
312 			now.tv_nsec = __now64.tv_nsec;
313 		    }
314 #else
315 		    INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
316 				      &now);
317 #endif
318 		    reltime.tv_sec = abstime->tv_sec - now.tv_sec;
319 		    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
320 		    if (reltime.tv_nsec < 0)
321 		      {
322 			reltime.tv_nsec += 1000000000;
323 			--reltime.tv_sec;
324 		      }
325 		    if (reltime.tv_sec >= 0)
326 		      while (nanosleep_not_cancel (&reltime, &reltime) != 0)
327 			continue;
328 
329 		    return ETIMEDOUT;
330 		  }
331 
332 		return INTERNAL_SYSCALL_ERRNO (e, __err);
333 	      }
334 
335 	    oldval = mutex->__data.__lock;
336 
337 	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
338 	  }
339 
340 	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
341 	  {
342 	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
343 
344 	    /* We got the mutex.  */
345 	    mutex->__data.__count = 1;
346 	    /* But it is inconsistent unless marked otherwise.  */
347 	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
348 
349 	    ENQUEUE_MUTEX_PI (mutex);
350 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
351 
352 	    /* Note that we deliberately exit here.  If we fall
353 	       through to the end of the function __nusers would be
354 	       incremented which is not correct because the old owner
355 	       has to be discounted.  */
356 	    return EOWNERDEAD;
357 	  }
358 
359 	if (robust
360 	    && __builtin_expect (mutex->__data.__owner
361 				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
362 	  {
363 	    /* This mutex is now not recoverable.  */
364 	    mutex->__data.__count = 0;
365 
366 	    INTERNAL_SYSCALL_DECL (__err);
367 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
368 	    INTERNAL_SYSCALL (futex_time64, __err, 4, &mutex->__data.__lock,
369 			      __lll_private_flag (FUTEX_UNLOCK_PI,
370 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
371 			      0, 0);
372 #else
373 	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
374 			      __lll_private_flag (FUTEX_UNLOCK_PI,
375 						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
376 			      0, 0);
377 #endif
378 
379 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
380 	    return ENOTRECOVERABLE;
381 	  }
382 
383 	mutex->__data.__count = 1;
384 	if (robust)
385 	  {
386 	    ENQUEUE_MUTEX_PI (mutex);
387 	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
388 	  }
389 	}
390       break;
391 
392     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
393     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
394     case PTHREAD_MUTEX_PP_NORMAL_NP:
395     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
396       {
397 	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
398 
399 	oldval = mutex->__data.__lock;
400 
401 	/* Check whether we already hold the mutex.  */
402 	if (mutex->__data.__owner == id)
403 	  {
404 	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
405 	      return EDEADLK;
406 
407 	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
408 	      {
409 		/* Just bump the counter.  */
410 		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
411 		  /* Overflow of the counter.  */
412 		  return EAGAIN;
413 
414 		++mutex->__data.__count;
415 
416 		return 0;
417 	      }
418 	  }
419 
420 	int oldprio = -1, ceilval;
421 	do
422 	  {
423 	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
424 			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
425 
426 	    if (__pthread_current_priority () > ceiling)
427 	      {
428 		result = EINVAL;
429 	      failpp:
430 		if (oldprio != -1)
431 		  __pthread_tpp_change_priority (oldprio, -1);
432 		return result;
433 	      }
434 
435 	    result = __pthread_tpp_change_priority (oldprio, ceiling);
436 	    if (result)
437 	      return result;
438 
439 	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
440 	    oldprio = ceiling;
441 
442 	    oldval
443 	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
444 						     ceilval | 1, ceilval);
445 
446 	    if (oldval == ceilval)
447 	      break;
448 
449 	    do
450 	      {
451 		oldval
452 		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
453 							 ceilval | 2,
454 							 ceilval | 1);
455 
456 		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
457 		  break;
458 
459 		if (oldval != ceilval)
460 		  {
461 		    /* Reject invalid timeouts.  */
462 		    if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
463 		      {
464 			result = EINVAL;
465 			goto failpp;
466 		      }
467 
468 		    struct timeval tv;
469 		    struct timespec rt;
470 
471 		    /* Get the current time.  */
472 		    (void) gettimeofday (&tv, NULL);
473 
474 		    /* Compute relative timeout.  */
475 		    rt.tv_sec = abstime->tv_sec - tv.tv_sec;
476 		    rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
477 		    if (rt.tv_nsec < 0)
478 		      {
479 			rt.tv_nsec += 1000000000;
480 			--rt.tv_sec;
481 		      }
482 
483 		    /* Already timed out?  */
484 		    if (rt.tv_sec < 0)
485 		      {
486 			result = ETIMEDOUT;
487 			goto failpp;
488 		      }
489 
490 		    lll_futex_timed_wait (&mutex->__data.__lock,
491 					  ceilval | 2, &rt,
492 					  PTHREAD_MUTEX_PSHARED (mutex));
493 		  }
494 	      }
495 	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
496 							ceilval | 2, ceilval)
497 		   != ceilval);
498 	  }
499 	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
500 
501 	assert (mutex->__data.__owner == 0);
502 	mutex->__data.__count = 1;
503       }
504       break;
505 
506     default:
507       /* Correct code cannot set any other type.  */
508       return EINVAL;
509     }
510 
511   if (result == 0)
512     {
513       /* Record the ownership.  */
514       mutex->__data.__owner = id;
515       ++mutex->__data.__nusers;
516     }
517 
518  out:
519   return result;
520 }
521