1 /* libc-internal interface for mutex locks.  NPTL version.
2    Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public License as
7    published by the Free Software Foundation; either version 2.1 of the
8    License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; see the file COPYING.LIB.  If
17    not, see <http://www.gnu.org/licenses/>.  */
18 
19 #ifndef _BITS_LIBC_LOCK_H
20 #define _BITS_LIBC_LOCK_H 1
21 
22 #include <bits/initspin.h>
23 #include <pthread.h>
24 #define __need_NULL
25 #include <stddef.h>
26 
27 
28 /* Fortunately Linux now has a mean to do locking which is realtime
29    safe without the aid of the thread library.  We also need no fancy
30    options like error checking mutexes etc.  We only need simple
31    locks, maybe recursive.  This can be easily and cheaply implemented
32    using futexes.  We will use them everywhere except in ld.so since
33    ld.so might be used on old kernels with a different libc.so.  */
34 #ifdef _LIBC
35 # include <lowlevellock.h>
36 # include <tls.h>
37 #endif
38 
39 /* Mutex type.  */
40 #if defined _LIBC || defined _IO_MTSAFE_IO
41 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
42 typedef pthread_mutex_t __libc_lock_t;
43 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
44 # else
45 typedef int __libc_lock_t;
46 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
47 # endif
48 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
49 # ifdef __USE_UNIX98
50 typedef pthread_rwlock_t __libc_rwlock_t;
51 # else
52 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
53 # endif
54 #else
55 typedef struct __libc_lock_opaque__ __libc_lock_t;
56 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
57 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
58 #endif
59 
60 /* Type for key to thread-specific data.  */
61 typedef pthread_key_t __libc_key_t;
62 
63 # define __libc_freeres_fn_section \
64       __attribute__ ((section ("__libc_freeres_fn")))
65 
66 
67 /* Define a lock variable NAME with storage class CLASS.  The lock must be
68    initialized with __libc_lock_init before it can be used (or define it
69    with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
70    declare a lock defined in another module.  In public structure
71    definitions you must use a pointer to the lock structure (i.e., NAME
72    begins with a `*'), because its storage size will not be known outside
73    of libc.  */
74 #define __libc_lock_define(CLASS,NAME) \
75   CLASS __libc_lock_t NAME;
76 #define __libc_rwlock_define(CLASS,NAME) \
77   CLASS __libc_rwlock_t NAME;
78 #define __libc_lock_define_recursive(CLASS,NAME) \
79   CLASS __libc_lock_recursive_t NAME;
80 #define __rtld_lock_define_recursive(CLASS,NAME) \
81   CLASS __rtld_lock_recursive_t NAME;
82 
83 /* Define an initialized lock variable NAME with storage class CLASS.
84 
85    For the C library we take a deeper look at the initializer.  For
86    this implementation all fields are initialized to zero.  Therefore
87    we don't initialize the variable which allows putting it into the
88    BSS section.  (Except on PA-RISC and other odd architectures, where
89    initialized locks must be set to one due to the lack of normal
90    atomic operations.) */
91 
92 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
93 # if LLL_LOCK_INITIALIZER == 0
94 #  define __libc_lock_define_initialized(CLASS,NAME) \
95   CLASS __libc_lock_t NAME;
96 # else
97 #  define __libc_lock_define_initialized(CLASS,NAME) \
98   CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
99 # endif
100 #else
101 # if __LT_SPINLOCK_INIT == 0
102 #  define __libc_lock_define_initialized(CLASS,NAME) \
103   CLASS __libc_lock_t NAME;
104 # else
105 #  define __libc_lock_define_initialized(CLASS,NAME) \
106   CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
107 # endif
108 #endif
109 
110 #define __libc_rwlock_define_initialized(CLASS,NAME) \
111   CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
112 
113 /* Define an initialized recursive lock variable NAME with storage
114    class CLASS.  */
115 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
116 # if LLL_LOCK_INITIALIZER == 0
117 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
118   CLASS __libc_lock_recursive_t NAME;
119 # else
120 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
121   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
122 # endif
123 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
124   { LLL_LOCK_INITIALIZER, 0, NULL }
125 #else
126 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
127   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
128 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
129   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
130 #endif
131 
132 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
133   CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
134 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
135   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
136 
137 #define __rtld_lock_initialize(NAME) \
138   (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
139 
140 /* If we check for a weakly referenced symbol and then perform a
141    normal jump to it te code generated for some platforms in case of
142    PIC is unnecessarily slow.  What would happen is that the function
143    is first referenced as data and then it is called indirectly
144    through the PLT.  We can make this a direct jump.  */
145 #ifdef __PIC__
146 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
147   (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
148                     _fn != NULL ? (*_fn) ARGS : ELSE; }))
149 #else
150 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
151   (FUNC != NULL ? FUNC ARGS : ELSE)
152 #endif
153 
154 # define PTFAVAIL(NAME) (NAME != NULL)
155 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
156   __libc_maybe_call (FUNC, ARGS, ELSE)
157 # define __libc_ptf_call_always(FUNC, ARGS) \
158   FUNC ARGS
159 
160 
161 /* Initialize the named lock variable, leaving it in a consistent, unlocked
162    state.  */
163 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
164 # define __libc_lock_init(NAME) ((void)((NAME) = LLL_LOCK_INITIALIZER))
165 #else
166 # define __libc_lock_init(NAME) \
167   __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
168 #endif
169 #if defined SHARED && !defined NOT_IN_libc
170 /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
171    inefficient.  */
172 # define __libc_rwlock_init(NAME) \
173   (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
174 #else
175 # define __libc_rwlock_init(NAME) \
176   __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
177 #endif
178 
179 /* Same as last but this time we initialize a recursive mutex.  */
180 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
181 # define __libc_lock_init_recursive(NAME) \
182   ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
183 #else
184 # define __libc_lock_init_recursive(NAME) \
185   do {									      \
186     if (__pthread_mutex_init != NULL)					      \
187       {									      \
188 	pthread_mutexattr_t __attr;					      \
189 	__pthread_mutexattr_init (&__attr);				      \
190 	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
191 	__pthread_mutex_init (&(NAME).mutex, &__attr);			      \
192 	__pthread_mutexattr_destroy (&__attr);				      \
193       }									      \
194   } while (0)
195 #endif
196 
197 #define __rtld_lock_init_recursive(NAME) \
198   do {									      \
199     if (__pthread_mutex_init != NULL)					      \
200       {									      \
201 	pthread_mutexattr_t __attr;					      \
202 	__pthread_mutexattr_init (&__attr);				      \
203 	__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
204 	__pthread_mutex_init (&(NAME).mutex, &__attr);			      \
205 	__pthread_mutexattr_destroy (&__attr);				      \
206       }									      \
207   } while (0)
208 
209 /* Finalize the named lock variable, which must be locked.  It cannot be
210    used again until __libc_lock_init is called again on it.  This must be
211    called on a lock variable before the containing storage is reused.  */
212 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
213 # define __libc_lock_fini(NAME) ((void) 0)
214 #else
215 # define __libc_lock_fini(NAME) \
216   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
217 #endif
218 #if defined SHARED && !defined NOT_IN_libc
219 # define __libc_rwlock_fini(NAME) ((void) 0)
220 #else
221 # define __libc_rwlock_fini(NAME) \
222   __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
223 #endif
224 
225 /* Finalize recursive named lock.  */
226 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
227 # define __libc_lock_fini_recursive(NAME) ((void) 0)
228 #else
229 # define __libc_lock_fini_recursive(NAME) \
230   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
231 #endif
232 
233 /* Lock the named lock variable.  */
234 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
235 # define __libc_lock_lock(NAME) \
236   ({ lll_lock (NAME, LLL_PRIVATE); 0; })
237 #else
238 # define __libc_lock_lock(NAME) \
239   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
240 #endif
241 #define __libc_rwlock_rdlock(NAME) \
242   __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
243 #define __libc_rwlock_wrlock(NAME) \
244   __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
245 
246 /* Lock the recursive named lock variable.  */
247 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
248 # define __libc_lock_lock_recursive(NAME) \
249   do {									      \
250     void *self = THREAD_SELF;						      \
251     if ((NAME).owner != self)						      \
252       {									      \
253 	lll_lock ((NAME).lock, LLL_PRIVATE);				      \
254 	(NAME).owner = self;						      \
255       }									      \
256     ++(NAME).cnt;							      \
257   } while (0)
258 #else
259 # define __libc_lock_lock_recursive(NAME) \
260   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
261 #endif
262 
263 /* Try to lock the named lock variable.  */
264 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
265 # define __libc_lock_trylock(NAME) \
266   lll_trylock (NAME)
267 #else
268 # define __libc_lock_trylock(NAME) \
269   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
270 #endif
271 #define __libc_rwlock_tryrdlock(NAME) \
272   __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
273 #define __libc_rwlock_trywrlock(NAME) \
274   __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
275 
276 /* Try to lock the recursive named lock variable.  */
277 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
278 # define __libc_lock_trylock_recursive(NAME) \
279   ({									      \
280     int result = 0;							      \
281     void *self = THREAD_SELF;						      \
282     if ((NAME).owner != self)						      \
283       {									      \
284 	if (lll_trylock ((NAME).lock) == 0)				      \
285 	  {								      \
286 	    (NAME).owner = self;					      \
287 	    (NAME).cnt = 1;						      \
288 	  }								      \
289 	else								      \
290 	  result = EBUSY;						      \
291       }									      \
292     else								      \
293       ++(NAME).cnt;							      \
294     result;								      \
295   })
296 #else
297 # define __libc_lock_trylock_recursive(NAME) \
298   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
299 #endif
300 
301 #define __rtld_lock_trylock_recursive(NAME) \
302   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
303 
304 /* Unlock the named lock variable.  */
305 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
306 # define __libc_lock_unlock(NAME) \
307   lll_unlock (NAME, LLL_PRIVATE)
308 #else
309 # define __libc_lock_unlock(NAME) \
310   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
311 #endif
312 #define __libc_rwlock_unlock(NAME) \
313   __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
314 
315 /* Unlock the recursive named lock variable.  */
316 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
317 /* We do no error checking here.  */
318 # define __libc_lock_unlock_recursive(NAME) \
319   do {									      \
320     if (--(NAME).cnt == 0)						      \
321       {									      \
322 	(NAME).owner = NULL;						      \
323 	lll_unlock ((NAME).lock, LLL_PRIVATE);				      \
324       }									      \
325   } while (0)
326 #else
327 # define __libc_lock_unlock_recursive(NAME) \
328   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
329 #endif
330 
331 #if defined _LIBC && defined SHARED
332 # define __rtld_lock_default_lock_recursive(lock) \
333   ++((pthread_mutex_t *)(lock))->__data.__count;
334 
335 # define __rtld_lock_default_unlock_recursive(lock) \
336   --((pthread_mutex_t *)(lock))->__data.__count;
337 
338 # define __rtld_lock_lock_recursive(NAME) \
339   GL(dl_rtld_lock_recursive) (&(NAME).mutex)
340 
341 # define __rtld_lock_unlock_recursive(NAME) \
342   GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
343 #else
344 # define __rtld_lock_lock_recursive(NAME) \
345   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
346 
347 # define __rtld_lock_unlock_recursive(NAME) \
348   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
349 #endif
350 
351 /* Define once control variable.  */
352 #if PTHREAD_ONCE_INIT == 0
353 /* Special case for static variables where we can avoid the initialization
354    if it is zero.  */
355 # define __libc_once_define(CLASS, NAME) \
356   CLASS pthread_once_t NAME
357 #else
358 # define __libc_once_define(CLASS, NAME) \
359   CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
360 #endif
361 
362 /* Call handler iff the first call.  */
363 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
364   do {									      \
365     if (PTFAVAIL (__pthread_once))					      \
366       __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),		      \
367 					       INIT_FUNCTION));		      \
368     else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {			      \
369       INIT_FUNCTION ();							      \
370       (ONCE_CONTROL) |= 2;						      \
371     }									      \
372   } while (0)
373 
374 
375 /* Note that for I/O cleanup handling we are using the old-style
376    cancel handling.  It does not have to be integrated with C++ snce
377    no C++ code is called in the middle.  The old-style handling is
378    faster and the support is not going away.  */
379 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
380                                    void (*routine) (void *), void *arg);
381 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
382                                   int execute);
383 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
384                                          void (*routine) (void *), void *arg);
385 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
386                                           int execute);
387 
388 /* Start critical region with cleanup.  */
389 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
390   { struct _pthread_cleanup_buffer _buffer;				      \
391     int _avail;								      \
392     if (DOIT) {								      \
393       _avail = PTFAVAIL (_pthread_cleanup_push_defer);			      \
394       if (_avail) {							      \
395 	__libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT,  \
396 							      ARG));	      \
397       } else {								      \
398 	_buffer.__routine = (FCT);					      \
399 	_buffer.__arg = (ARG);						      \
400       }									      \
401     } else {								      \
402       _avail = 0;							      \
403     }
404 
405 /* End critical region with cleanup.  */
406 #define __libc_cleanup_region_end(DOIT) \
407     if (_avail) {							      \
408       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
409     } else if (DOIT)							      \
410       _buffer.__routine (_buffer.__arg);				      \
411   }
412 
413 /* Sometimes we have to exit the block in the middle.  */
414 #define __libc_cleanup_end(DOIT) \
415     if (_avail) {							      \
416       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
417     } else if (DOIT)							      \
418       _buffer.__routine (_buffer.__arg)
419 
420 
421 /* Normal cleanup handling, based on C cleanup attribute.  */
422 static inline void
__libc_cleanup_routine(struct __pthread_cleanup_frame * f)423 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
424 {
425   if (f->__do_it)
426     f->__cancel_routine (f->__cancel_arg);
427 }
428 
429 #define __libc_cleanup_push(fct, arg) \
430   do {									      \
431     struct __pthread_cleanup_frame __clframe				      \
432       __attribute__ ((__cleanup__ (__libc_cleanup_routine)))		      \
433       = { .__cancel_routine = (fct), .__cancel_arg = (arg),		      \
434           .__do_it = 1 };
435 
436 #define __libc_cleanup_pop(execute) \
437     __clframe.__do_it = (execute);					      \
438   } while (0)
439 
440 
441 /* Create thread-specific key.  */
442 #define __libc_key_create(KEY, DESTRUCTOR) \
443   __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
444 
445 /* Get thread-specific data.  */
446 #define __libc_getspecific(KEY) \
447   __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
448 
449 /* Set thread-specific data.  */
450 #define __libc_setspecific(KEY, VALUE) \
451   __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
452 
453 /* Register handlers to execute before and after `fork'.  Note that the
454    last parameter is NULL.  The handlers registered by the libc are
455    never removed so this is OK.  */
456 #define __libc_atfork(PREPARE, PARENT, CHILD) \
457   __register_atfork (PREPARE, PARENT, CHILD, NULL)
458 extern int __register_atfork (void (*__prepare) (void),
459 			      void (*__parent) (void),
460 			      void (*__child) (void),
461 			      void *__dso_handle);
462 
463 /* Functions that are used by this file and are internal to the GNU C
464    library.  */
465 
466 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
467 				 const pthread_mutexattr_t *__mutex_attr);
468 
469 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
470 
471 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
472 
473 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
474 
475 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
476 
477 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
478 
479 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
480 
481 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
482 					int __kind);
483 
484 #ifdef __USE_UNIX98
485 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
486 				  const pthread_rwlockattr_t *__attr);
487 
488 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
489 
490 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
491 
492 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
493 
494 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
495 
496 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
497 
498 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
499 #endif
500 
501 extern int __pthread_key_create (pthread_key_t *__key,
502 				 void (*__destr_function) (void *));
503 
504 extern int __pthread_setspecific (pthread_key_t __key,
505 				  const void *__pointer);
506 
507 extern void *__pthread_getspecific (pthread_key_t __key);
508 
509 extern int __pthread_once (pthread_once_t *__once_control,
510 			   void (*__init_routine) (void));
511 
512 extern int __pthread_atfork (void (*__prepare) (void),
513 			     void (*__parent) (void),
514 			     void (*__child) (void));
515 
516 
517 
518 /* Make the pthread functions weak so that we can elide them from
519    single-threaded processes.  */
520 #ifndef __NO_WEAK_PTHREAD_ALIASES
521 # ifdef weak_extern
522 weak_extern (__pthread_mutex_init)
523 weak_extern (__pthread_mutex_destroy)
524 weak_extern (__pthread_mutex_lock)
525 weak_extern (__pthread_mutex_trylock)
526 weak_extern (__pthread_mutex_unlock)
527 weak_extern (__pthread_mutexattr_init)
528 weak_extern (__pthread_mutexattr_destroy)
529 weak_extern (__pthread_mutexattr_settype)
530 weak_extern (__pthread_rwlock_init)
531 weak_extern (__pthread_rwlock_destroy)
532 weak_extern (__pthread_rwlock_rdlock)
533 weak_extern (__pthread_rwlock_tryrdlock)
534 weak_extern (__pthread_rwlock_wrlock)
535 weak_extern (__pthread_rwlock_trywrlock)
536 weak_extern (__pthread_rwlock_unlock)
537 weak_extern (__pthread_key_create)
538 weak_extern (__pthread_setspecific)
539 weak_extern (__pthread_getspecific)
540 weak_extern (__pthread_once)
541 //weak_extern (__pthread_initialize)
542 weak_extern (__pthread_atfork)
543 weak_extern (_pthread_cleanup_push_defer)
544 weak_extern (_pthread_cleanup_pop_restore)
545 weak_extern (pthread_setcancelstate)
546 # else
547 #  pragma weak __pthread_mutex_init
548 #  pragma weak __pthread_mutex_destroy
549 #  pragma weak __pthread_mutex_lock
550 #  pragma weak __pthread_mutex_trylock
551 #  pragma weak __pthread_mutex_unlock
552 #  pragma weak __pthread_mutexattr_init
553 #  pragma weak __pthread_mutexattr_destroy
554 #  pragma weak __pthread_mutexattr_settype
555 #  pragma weak __pthread_rwlock_destroy
556 #  pragma weak __pthread_rwlock_rdlock
557 #  pragma weak __pthread_rwlock_tryrdlock
558 #  pragma weak __pthread_rwlock_wrlock
559 #  pragma weak __pthread_rwlock_trywrlock
560 #  pragma weak __pthread_rwlock_unlock
561 #  pragma weak __pthread_key_create
562 #  pragma weak __pthread_setspecific
563 #  pragma weak __pthread_getspecific
564 #  pragma weak __pthread_once
565 //#  pragma weak __pthread_initialize
566 #  pragma weak __pthread_atfork
567 #  pragma weak _pthread_cleanup_push_defer
568 #  pragma weak _pthread_cleanup_pop_restore
569 #  pragma weak pthread_setcancelstate
570 # endif
571 #endif
572 
573 #endif	/* bits/libc-lock.h */
574