1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <fork.h>
32 #include <version.h>
33 #include <smp.h>
34 #include <lowlevellock.h>
35 #include <bits/kernel-features.h>
36 #include <stdio.h>
37
38 /* Size and alignment of static TLS block. */
39 size_t __static_tls_size;
40 size_t __static_tls_align_m1;
41
42 #ifndef __ASSUME_SET_ROBUST_LIST
43 /* Negative if we do not have the system call and we can use it. */
44 int __set_robust_list_avail;
45 # define set_robust_list_not_avail() \
46 __set_robust_list_avail = -1
47 #else
48 # define set_robust_list_not_avail() do { } while (0)
49 #endif
50
51 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
52 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
53 int __have_futex_clock_realtime;
54 # define __set_futex_clock_realtime() \
55 __have_futex_clock_realtime = 1
56 #else
57 #define __set_futex_clock_realtime() do { } while (0)
58 #endif
59
60 /* Version of the library, used in libthread_db to detect mismatches. */
61 static const char nptl_version[] __attribute_used__ = VERSION;
62
63
64 #ifndef SHARED
65 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
66 #endif
67
68 #ifdef SHARED
69 static void nptl_freeres (void);
70
71
72 static const struct pthread_functions pthread_functions =
73 {
74 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
75 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
76 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
77 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
78 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
79 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
80 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
81 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
82 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
83 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
84 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
85 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
86 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
87 .ptr_pthread_condattr_init = __pthread_condattr_init,
88 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
89 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
90 .ptr___pthread_cond_init = __pthread_cond_init,
91 .ptr___pthread_cond_signal = __pthread_cond_signal,
92 .ptr___pthread_cond_wait = __pthread_cond_wait,
93 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
94 .ptr_pthread_equal = __pthread_equal,
95 .ptr___pthread_exit = __pthread_exit,
96 .ptr_pthread_getschedparam = __pthread_getschedparam,
97 .ptr_pthread_setschedparam = __pthread_setschedparam,
98 .ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
99 .ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
100 .ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
101 .ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
102 .ptr_pthread_self = __pthread_self,
103 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
104 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
105 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
106 .ptr___pthread_once = __pthread_once_internal,
107 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock_internal,
108 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock_internal,
109 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock_internal,
110 .ptr___pthread_key_create = __pthread_key_create_internal,
111 .ptr___pthread_getspecific = __pthread_getspecific_internal,
112 .ptr___pthread_setspecific = __pthread_setspecific_internal,
113 .ptr__pthread_cleanup_push_defer = _pthread_cleanup_push_defer,
114 .ptr__pthread_cleanup_pop_restore = _pthread_cleanup_pop_restore,
115 .ptr_nthreads = &__nptl_nthreads,
116 .ptr___pthread_unwind = &__pthread_unwind,
117 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
118 .ptr__nptl_setxid = __nptl_setxid,
119 /* For now only the stack cache needs to be freed. */
120 .ptr_freeres = nptl_freeres
121 };
122 # define ptr_pthread_functions &pthread_functions
123 #else
124 # define ptr_pthread_functions NULL
125 #endif
126
127
128 #ifdef SHARED
129 /* This function is called indirectly from the freeres code in libc. */
130 static void
131 __libc_freeres_fn_section
nptl_freeres(void)132 nptl_freeres (void)
133 {
134 __unwind_freeres ();
135 __free_stacks (0);
136 }
137 #endif
138
139
140 /* For asynchronous cancellation we use a signal. This is the handler. */
141 static void
sigcancel_handler(int sig,siginfo_t * si,void * ctx)142 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
143 {
144 #ifdef __ASSUME_CORRECT_SI_PID
145 /* Determine the process ID. It might be negative if the thread is
146 in the middle of a fork() call. */
147 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
148 if (__builtin_expect (pid < 0, 0))
149 pid = -pid;
150 #endif
151
152 /* Safety check. It would be possible to call this function for
153 other signals and send a signal from another process. This is not
154 correct and might even be a security problem. Try to catch as
155 many incorrect invocations as possible. */
156 if (sig != SIGCANCEL
157 #ifdef __ASSUME_CORRECT_SI_PID
158 /* Kernels before 2.5.75 stored the thread ID and not the process
159 ID in si_pid so we skip this test. */
160 || si->si_pid != pid
161 #endif
162 || si->si_code != SI_TKILL)
163 return;
164
165 struct pthread *self = THREAD_SELF;
166
167 int oldval = THREAD_GETMEM (self, cancelhandling);
168 while (1)
169 {
170 /* We are canceled now. When canceled by another thread this flag
171 is already set but if the signal is directly send (internally or
172 from another process) is has to be done here. */
173 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
174
175 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
176 /* Already canceled or exiting. */
177 break;
178
179 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
180 oldval);
181 if (curval == oldval)
182 {
183 /* Set the return value. */
184 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
185
186 /* Make sure asynchronous cancellation is still enabled. */
187 if ((newval & CANCELTYPE_BITMASK) != 0)
188 /* Run the registered destructors and terminate the thread. */
189 __do_cancel ();
190
191 break;
192 }
193
194 oldval = curval;
195 }
196 }
197
198
199 struct xid_command *__xidcmd attribute_hidden;
200
201 /* For asynchronous cancellation we use a signal. This is the handler. */
202 static void
sighandler_setxid(int sig,siginfo_t * si,void * ctx)203 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
204 {
205 #ifdef __ASSUME_CORRECT_SI_PID
206 /* Determine the process ID. It might be negative if the thread is
207 in the middle of a fork() call. */
208 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
209 if (__builtin_expect (pid < 0, 0))
210 pid = -pid;
211 #endif
212
213 /* Safety check. It would be possible to call this function for
214 other signals and send a signal from another process. This is not
215 correct and might even be a security problem. Try to catch as
216 many incorrect invocations as possible. */
217 if (sig != SIGSETXID
218 #ifdef __ASSUME_CORRECT_SI_PID
219 /* Kernels before 2.5.75 stored the thread ID and not the process
220 ID in si_pid so we skip this test. */
221 || si->si_pid != pid
222 #endif
223 || si->si_code != SI_TKILL)
224 return;
225
226 INTERNAL_SYSCALL_DECL (err);
227 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
228 __xidcmd->id[1], __xidcmd->id[2]);
229
230 /* Reset the SETXID flag. */
231 struct pthread *self = THREAD_SELF;
232 int flags, newval;
233 do
234 {
235 flags = THREAD_GETMEM (self, cancelhandling);
236 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
237 flags & ~SETXID_BITMASK, flags);
238 }
239 while (flags != newval);
240
241 /* And release the futex. */
242 self->setxid_futex = 1;
243 lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
244
245 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
246 lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
247 }
248
249
250 /* When using __thread for this, we do it in libc so as not
251 to give libpthread its own TLS segment just for this. */
252 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
253
254
255 /* This can be set by the debugger before initialization is complete. */
256 static bool __nptl_initial_report_events __attribute_used__;
257
258 void __pthread_initialize_minimal_internal (void) attribute_hidden;
259 void
__pthread_initialize_minimal_internal(void)260 __pthread_initialize_minimal_internal (void)
261 {
262 static int initialized = 0;
263
264 if (initialized)
265 return;
266 initialized = 1;
267
268 #ifndef SHARED
269 /* Unlike in the dynamically linked case the dynamic linker has not
270 taken care of initializing the TLS data structures. */
271 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
272
273 /* We must prevent gcc from being clever and move any of the
274 following code ahead of the __libc_setup_tls call. This function
275 will initialize the thread register which is subsequently
276 used. */
277 __asm__ __volatile__ ("");
278 #endif
279
280 /* Minimal initialization of the thread descriptor. */
281 struct pthread *pd = THREAD_SELF;
282 INTERNAL_SYSCALL_DECL (err);
283 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
284 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
285 THREAD_SETMEM (pd, user_stack, true);
286 if (LLL_LOCK_INITIALIZER != 0)
287 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
288 #if HP_TIMING_AVAIL
289 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
290 #endif
291
292 /* Initialize the robust mutex data. */
293 #ifdef __PTHREAD_MUTEX_HAVE_PREV
294 pd->robust_prev = &pd->robust_head;
295 #endif
296 pd->robust_head.list = &pd->robust_head;
297 #ifdef __NR_set_robust_list
298 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
299 - offsetof (pthread_mutex_t,
300 __data.__list.__next));
301 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
302 sizeof (struct robust_list_head));
303 if (INTERNAL_SYSCALL_ERROR_P (res, err))
304 #endif
305 set_robust_list_not_avail ();
306
307 #ifndef __ASSUME_PRIVATE_FUTEX
308 /* Private futexes are always used (at least internally) so that
309 doing the test once this early is beneficial. */
310 {
311 int word = 0;
312 word = INTERNAL_SYSCALL (futex, err, 3, &word,
313 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
314 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
315 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
316 }
317
318 /* Private futexes have been introduced earlier than the
319 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
320 know the former are not supported. This also means we know the
321 kernel will return ENOSYS for unknown operations. */
322 if (THREAD_GETMEM (pd, header.private_futex) != 0)
323 #endif
324 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
325 {
326 int word = 0;
327 /* NB: the syscall actually takes six parameters. The last is the
328 bit mask. But since we will not actually wait at all the value
329 is irrelevant. Given that passing six parameters is difficult
330 on some architectures we just pass whatever random value the
331 calling convention calls for to the kernel. It causes no harm. */
332 word = INTERNAL_SYSCALL (futex, err, 5, &word,
333 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
334 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
335 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
336 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
337 __set_futex_clock_realtime ();
338 }
339 #endif
340
341 /* Set initial thread's stack block from 0 up to __libc_stack_end.
342 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
343 purposes this is good enough. */
344 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
345
346 /* Initialize the list of all running threads with the main thread. */
347 INIT_LIST_HEAD (&__stack_user);
348 list_add (&pd->list, &__stack_user);
349
350 /* Before initializing __stack_user, the debugger could not find us and
351 had to set __nptl_initial_report_events. Propagate its setting. */
352 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
353
354 /* Install the cancellation signal handler. If for some reason we
355 cannot install the handler we do not abort. Maybe we should, but
356 it is only asynchronous cancellation which is affected. */
357 struct sigaction sa;
358 sa.sa_sigaction = sigcancel_handler;
359 sa.sa_flags = SA_SIGINFO;
360 __sigemptyset (&sa.sa_mask);
361
362 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
363
364 /* Install the handle to change the threads' uid/gid. */
365 sa.sa_sigaction = sighandler_setxid;
366 sa.sa_flags = SA_SIGINFO | SA_RESTART;
367
368 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
369
370 /* The parent process might have left the signals blocked. Just in
371 case, unblock it. We reuse the signal mask in the sigaction
372 structure. It is already cleared. */
373 __sigaddset (&sa.sa_mask, SIGCANCEL);
374 __sigaddset (&sa.sa_mask, SIGSETXID);
375 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
376 NULL, _NSIG / 8);
377
378 /* Get the size of the static and alignment requirements for the TLS
379 block. */
380 size_t static_tls_align;
381 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
382
383 /* Make sure the size takes all the alignments into account. */
384 if (STACK_ALIGN > static_tls_align)
385 static_tls_align = STACK_ALIGN;
386 __static_tls_align_m1 = static_tls_align - 1;
387
388 __static_tls_size = roundup (__static_tls_size, static_tls_align);
389
390 /* Determine the default allowed stack size. This is the size used
391 in case the user does not specify one. */
392 struct rlimit limit;
393 if (getrlimit (RLIMIT_STACK, &limit) != 0
394 || limit.rlim_cur == RLIM_INFINITY)
395 /* The system limit is not usable. Use an architecture-specific
396 default. */
397 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
398 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
399 /* The system limit is unusably small.
400 Use the minimal size acceptable. */
401 limit.rlim_cur = PTHREAD_STACK_MIN;
402
403 /* Make sure it meets the minimum size that allocate_stack
404 (allocatestack.c) will demand, which depends on the page size. */
405 const uintptr_t pagesz = sysconf (_SC_PAGESIZE);
406 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
407 if (limit.rlim_cur < minstack)
408 limit.rlim_cur = minstack;
409
410 /* Round the resource limit up to page size. */
411 limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
412 __default_stacksize = limit.rlim_cur;
413
414 #ifdef SHARED
415 /* Transfer the old value from the dynamic linker's internal location. */
416 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
417 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
418
419 #endif
420
421 GL(dl_init_static_tls) = &__pthread_init_static_tls;
422
423 /* Register the fork generation counter with the libc. */
424 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
425 __libc_multiple_threads_ptr =
426 #endif
427 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
428 ptr_pthread_functions);
429
430 /* Determine whether the machine is SMP or not. */
431 __is_smp = is_smp_system ();
432
433 /* uClibc-specific stdio initialization for threads. */
434 {
435 FILE *fp;
436 _stdio_user_locking = 0; /* 2 if threading not initialized */
437 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
438 if (fp->__user_locking != 1) {
439 fp->__user_locking = 0;
440 }
441 }
442 }
443 }
444 strong_alias (__pthread_initialize_minimal_internal,
445 __pthread_initialize_minimal)
446