1 /* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <errno.h>
20 #include <stdbool.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include "pthreadP.h"
24 #include <ldsodefs.h>
25 #include <atomic.h>
26 #include <resolv.h>
27 #include <bits/kernel-features.h>
28
29
30 /* Local function to start thread and handle cleanup. */
31 static int start_thread (void *arg);
32
33
34 /* Nozero if debugging mode is enabled. */
35 int __pthread_debug;
36
37 /* Globally enabled events. */
38 static td_thr_events_t __nptl_threads_events __attribute_used__;
39
40 /* Pointer to descriptor with the last event. */
41 static struct pthread *__nptl_last_event __attribute_used__;
42
43 /* Number of threads running. */
44 unsigned int __nptl_nthreads = 1;
45
46
47 /* Code to allocate and deallocate a stack. */
48 #include "allocatestack.c"
49
50 /* Code to create the thread. */
51 #include <createthread.c>
52
53
54 struct pthread *
55 internal_function
__find_in_stack_list(struct pthread * pd)56 __find_in_stack_list (
57 struct pthread *pd)
58 {
59 list_t *entry;
60 struct pthread *result = NULL;
61
62 lll_lock (stack_cache_lock, LLL_PRIVATE);
63
64 list_for_each (entry, &stack_used)
65 {
66 struct pthread *curp;
67
68 curp = list_entry (entry, struct pthread, list);
69 if (curp == pd)
70 {
71 result = curp;
72 break;
73 }
74 }
75
76 if (result == NULL)
77 list_for_each (entry, &__stack_user)
78 {
79 struct pthread *curp;
80
81 curp = list_entry (entry, struct pthread, list);
82 if (curp == pd)
83 {
84 result = curp;
85 break;
86 }
87 }
88
89 lll_unlock (stack_cache_lock, LLL_PRIVATE);
90
91 return result;
92 }
93
94
95 /* Deallocate POSIX thread-local-storage. */
96 void
97 attribute_hidden
__nptl_deallocate_tsd(void)98 __nptl_deallocate_tsd (void)
99 {
100 struct pthread *self = THREAD_SELF;
101
102 /* Maybe no data was ever allocated. This happens often so we have
103 a flag for this. */
104 if (THREAD_GETMEM (self, specific_used))
105 {
106 size_t round;
107 size_t cnt;
108
109 round = 0;
110 do
111 {
112 size_t idx;
113
114 /* So far no new nonzero data entry. */
115 THREAD_SETMEM (self, specific_used, false);
116
117 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
118 {
119 struct pthread_key_data *level2;
120
121 level2 = THREAD_GETMEM_NC (self, specific, cnt);
122
123 if (level2 != NULL)
124 {
125 size_t inner;
126
127 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
128 ++inner, ++idx)
129 {
130 void *data = level2[inner].data;
131
132 if (data != NULL)
133 {
134 /* Always clear the data. */
135 level2[inner].data = NULL;
136
137 /* Make sure the data corresponds to a valid
138 key. This test fails if the key was
139 deallocated and also if it was
140 re-allocated. It is the user's
141 responsibility to free the memory in this
142 case. */
143 if (level2[inner].seq
144 == __pthread_keys[idx].seq
145 /* It is not necessary to register a destructor
146 function. */
147 && __pthread_keys[idx].destr != NULL)
148 /* Call the user-provided destructor. */
149 __pthread_keys[idx].destr (data);
150 }
151 }
152 }
153 else
154 idx += PTHREAD_KEY_1STLEVEL_SIZE;
155 }
156
157 if (THREAD_GETMEM (self, specific_used) == 0)
158 /* No data has been modified. */
159 goto just_free;
160 }
161 /* We only repeat the process a fixed number of times. */
162 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
163
164 /* Just clear the memory of the first block for reuse. */
165 memset (&THREAD_SELF->specific_1stblock, '\0',
166 sizeof (self->specific_1stblock));
167
168 just_free:
169 /* Free the memory for the other blocks. */
170 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
171 {
172 struct pthread_key_data *level2;
173
174 level2 = THREAD_GETMEM_NC (self, specific, cnt);
175 if (level2 != NULL)
176 {
177 /* The first block is allocated as part of the thread
178 descriptor. */
179 free (level2);
180 THREAD_SETMEM_NC (self, specific, cnt, NULL);
181 }
182 }
183
184 THREAD_SETMEM (self, specific_used, false);
185 }
186 }
187
188
189 /* Deallocate a thread's stack after optionally making sure the thread
190 descriptor is still valid. */
191 void
192 internal_function
__free_tcb(struct pthread * pd)193 __free_tcb (struct pthread *pd)
194 {
195 /* The thread is exiting now. */
196 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
197 TERMINATED_BIT) == 0, 1))
198 {
199 /* Remove the descriptor from the list. */
200 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
201 /* Something is really wrong. The descriptor for a still
202 running thread is gone. */
203 abort ();
204
205 /* Free TPP data. */
206 if (__builtin_expect (pd->tpp != NULL, 0))
207 {
208 struct priority_protection_data *tpp = pd->tpp;
209
210 pd->tpp = NULL;
211 free (tpp);
212 }
213
214 /* Queue the stack memory block for reuse and exit the process. The
215 kernel will signal via writing to the address returned by
216 QUEUE-STACK when the stack is available. */
217 __deallocate_stack (pd);
218 }
219 }
220
221
222 static int
start_thread(void * arg)223 start_thread (void *arg)
224 {
225 struct pthread *pd = (struct pthread *) arg;
226
227 #if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
228 /* Initialize resolver state pointer. */
229 __resp = &pd->res;
230 #endif
231 #ifdef __NR_set_robust_list
232 # ifndef __ASSUME_SET_ROBUST_LIST
233 if (__set_robust_list_avail >= 0)
234 # endif
235 {
236 INTERNAL_SYSCALL_DECL (err);
237 /* This call should never fail because the initial call in init.c
238 succeeded. */
239 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
240 sizeof (struct robust_list_head));
241 }
242 #endif
243
244 /* If the parent was running cancellation handlers while creating
245 the thread the new thread inherited the signal mask. Reset the
246 cancellation signal mask. */
247 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
248 {
249 INTERNAL_SYSCALL_DECL (err);
250 sigset_t mask;
251 __sigemptyset (&mask);
252 __sigaddset (&mask, SIGCANCEL);
253 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
254 NULL, _NSIG / 8);
255 }
256
257 /* This is where the try/finally block should be created. For
258 compilers without that support we do use setjmp. */
259 struct pthread_unwind_buf unwind_buf;
260
261 /* No previous handlers. */
262 unwind_buf.priv.data.prev = NULL;
263 unwind_buf.priv.data.cleanup = NULL;
264
265 int not_first_call;
266 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
267 if (__builtin_expect (! not_first_call, 1))
268 {
269 /* Store the new cleanup handler info. */
270 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
271
272 if (__builtin_expect (pd->stopped_start, 0))
273 {
274 int oldtype = CANCEL_ASYNC ();
275
276 /* Get the lock the parent locked to force synchronization. */
277 lll_lock (pd->lock, LLL_PRIVATE);
278 /* And give it up right away. */
279 lll_unlock (pd->lock, LLL_PRIVATE);
280
281 CANCEL_RESET (oldtype);
282 }
283
284 /* Run the code the user provided. */
285 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
286 }
287
288 /* Run the destructor for the thread-local data. */
289 __nptl_deallocate_tsd ();
290
291 /* Clean up any state libc stored in thread-local variables. */
292 /* disable for now
293 __libc_thread_freeres ();
294 */
295 /* If this is the last thread we terminate the process now. We
296 do not notify the debugger, it might just irritate it if there
297 is no thread left. */
298 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
299 /* This was the last thread. */
300 exit (0);
301
302 /* Report the death of the thread if this is wanted. */
303 if (__builtin_expect (pd->report_events, 0))
304 {
305 /* See whether TD_DEATH is in any of the mask. */
306 const int idx = __td_eventword (TD_DEATH);
307 const uint32_t mask = __td_eventmask (TD_DEATH);
308
309 if ((mask & (__nptl_threads_events.event_bits[idx]
310 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
311 {
312 /* Yep, we have to signal the death. Add the descriptor to
313 the list but only if it is not already on it. */
314 if (pd->nextevent == NULL)
315 {
316 pd->eventbuf.eventnum = TD_DEATH;
317 pd->eventbuf.eventdata = pd;
318
319 do
320 pd->nextevent = __nptl_last_event;
321 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
322 pd, pd->nextevent));
323 }
324
325 /* Now call the function to signal the event. */
326 __nptl_death_event ();
327 }
328 }
329
330 /* The thread is exiting now. Don't set this bit until after we've hit
331 the event-reporting breakpoint, so that td_thr_get_info on us while at
332 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
333 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
334
335 #ifndef __ASSUME_SET_ROBUST_LIST
336 /* If this thread has any robust mutexes locked, handle them now. */
337 # if __WORDSIZE == 64
338 void *robust = pd->robust_head.list;
339 # else
340 __pthread_slist_t *robust = pd->robust_list.__next;
341 # endif
342 /* We let the kernel do the notification if it is able to do so.
343 If we have to do it here there for sure are no PI mutexes involved
344 since the kernel support for them is even more recent. */
345 if (__set_robust_list_avail < 0
346 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
347 {
348 do
349 {
350 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
351 ((char *) robust - offsetof (struct __pthread_mutex_s,
352 __list.__next));
353 robust = *((void **) robust);
354
355 # ifdef __PTHREAD_MUTEX_HAVE_PREV
356 this->__list.__prev = NULL;
357 # endif
358 this->__list.__next = NULL;
359
360 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
361 }
362 while (robust != (void *) &pd->robust_head);
363 }
364 #endif
365
366 /* Mark the memory of the stack as usable to the kernel. We free
367 everything except for the space used for the TCB itself. */
368 size_t pagesize_m1 = __getpagesize () - 1;
369 char *sp = CURRENT_STACK_FRAME;
370 #ifdef _STACK_GROWS_DOWN
371 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
372 #else
373 size_t freesize = ((char *) pd->stackblock - sp) & ~pagesize_m1;
374 #endif
375 assert (freesize < pd->stackblock_size);
376
377 /* madvise is not supported on MMU-less systems. */
378 #ifdef __ARCH_USE_MMU__
379 if (freesize > PTHREAD_STACK_MIN)
380 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
381 #endif
382
383 /* If the thread is detached free the TCB. */
384 if (IS_DETACHED (pd))
385 /* Free the TCB. */
386 __free_tcb (pd);
387 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
388 {
389 /* Some other thread might call any of the setXid functions and expect
390 us to reply. In this case wait until we did that. */
391 do
392 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
393 while (pd->cancelhandling & SETXID_BITMASK);
394
395 /* Reset the value so that the stack can be reused. */
396 pd->setxid_futex = 0;
397 }
398
399 /* We cannot call '_exit' here. '_exit' will terminate the process.
400
401 The 'exit' implementation in the kernel will signal when the
402 process is really dead since 'clone' got passed the CLONE_CLEARTID
403 flag. The 'tid' field in the TCB will be set to zero.
404
405 The exit code is zero since in case all threads exit by calling
406 'pthread_exit' the exit status must be 0 (zero). */
407 __exit_thread_inline (0);
408
409 /* NOTREACHED */
410 return 0;
411 }
412
413
414 /* Default thread attributes for the case when the user does not
415 provide any. */
416 static const struct pthread_attr default_attr =
417 {
418 /* Just some value > 0 which gets rounded to the nearest page size. */
419 .guardsize = 1,
420 };
421
422
423 int
pthread_create(pthread_t * newthread,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)424 pthread_create (
425 pthread_t *newthread,
426 const pthread_attr_t *attr,
427 void *(*start_routine) (void *),
428 void *arg)
429 {
430 STACK_VARIABLES;
431
432 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
433 if (iattr == NULL)
434 /* Is this the best idea? On NUMA machines this could mean
435 accessing far-away memory. */
436 iattr = &default_attr;
437
438 struct pthread *pd = NULL;
439 int err = ALLOCATE_STACK (iattr, &pd);
440 if (__builtin_expect (err != 0, 0))
441 /* Something went wrong. Maybe a parameter of the attributes is
442 invalid or we could not allocate memory. */
443 return err;
444
445
446 /* Initialize the TCB. All initializations with zero should be
447 performed in 'get_cached_stack'. This way we avoid doing this if
448 the stack freshly allocated with 'mmap'. */
449
450 #ifdef TLS_TCB_AT_TP
451 /* Reference to the TCB itself. */
452 pd->header.self = pd;
453
454 /* Self-reference for TLS. */
455 pd->header.tcb = pd;
456 #endif
457
458 /* Store the address of the start routine and the parameter. Since
459 we do not start the function directly the stillborn thread will
460 get the information from its thread descriptor. */
461 pd->start_routine = start_routine;
462 pd->arg = arg;
463
464 /* Copy the thread attribute flags. */
465 struct pthread *self = THREAD_SELF;
466 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
467 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
468
469 /* Initialize the field for the ID of the thread which is waiting
470 for us. This is a self-reference in case the thread is created
471 detached. */
472 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
473
474 /* The debug events are inherited from the parent. */
475 pd->eventbuf = self->eventbuf;
476
477
478 /* Copy the parent's scheduling parameters. The flags will say what
479 is valid and what is not. */
480 pd->schedpolicy = self->schedpolicy;
481 pd->schedparam = self->schedparam;
482
483 /* Copy the stack guard canary. */
484 #ifdef THREAD_COPY_STACK_GUARD
485 THREAD_COPY_STACK_GUARD (pd);
486 #endif
487
488 /* Copy the pointer guard value. */
489 #ifdef THREAD_COPY_POINTER_GUARD
490 THREAD_COPY_POINTER_GUARD (pd);
491 #endif
492
493 /* Determine scheduling parameters for the thread. */
494 if (attr != NULL
495 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
496 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
497 {
498 INTERNAL_SYSCALL_DECL (scerr);
499
500 /* Use the scheduling parameters the user provided. */
501 if (iattr->flags & ATTR_FLAG_POLICY_SET)
502 pd->schedpolicy = iattr->schedpolicy;
503 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
504 {
505 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
506 pd->flags |= ATTR_FLAG_POLICY_SET;
507 }
508
509 if (iattr->flags & ATTR_FLAG_SCHED_SET)
510 memcpy (&pd->schedparam, &iattr->schedparam,
511 sizeof (struct sched_param));
512 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
513 {
514 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
515 pd->flags |= ATTR_FLAG_SCHED_SET;
516 }
517
518 /* Check for valid priorities. */
519 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
520 iattr->schedpolicy);
521 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
522 iattr->schedpolicy);
523 if (pd->schedparam.sched_priority < minprio
524 || pd->schedparam.sched_priority > maxprio)
525 {
526 err = EINVAL;
527 goto errout;
528 }
529 }
530
531 /* Pass the descriptor to the caller. */
532 *newthread = (pthread_t) pd;
533
534 /* Remember whether the thread is detached or not. In case of an
535 error we have to free the stacks of non-detached stillborn
536 threads. */
537 bool is_detached = IS_DETACHED (pd);
538
539 /* Start the thread. */
540 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
541 if (err != 0)
542 {
543 /* Something went wrong. Free the resources. */
544 if (!is_detached)
545 {
546 errout:
547 __deallocate_stack (pd);
548 }
549 return err;
550 }
551
552 return 0;
553 }
554
555 /* Information for libthread_db. */
556
557 #include "../nptl_db/db_info.c"
558
559 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
560 functions to be present as well. */
561 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
562 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
563 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
564
565 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
566 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
567
568 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
569 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
570 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
571 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
572
573 /* UCLIBC_MUTEX_xxx macros expects to have these as well */
574 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_init)
575 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_push_defer)
576 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_pop_restore)
577