1 /* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <errno.h>
20 #include <stdbool.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include "pthreadP.h"
24 #include <hp-timing.h>
25 #include <ldsodefs.h>
26 #include <atomic.h>
27 #include <resolv.h>
28 #include <bits/kernel-features.h>
29
30
31 /* Local function to start thread and handle cleanup. */
32 static int start_thread (void *arg);
33
34
35 /* Nozero if debugging mode is enabled. */
36 int __pthread_debug;
37
38 /* Globally enabled events. */
39 static td_thr_events_t __nptl_threads_events __attribute_used__;
40
41 /* Pointer to descriptor with the last event. */
42 static struct pthread *__nptl_last_event __attribute_used__;
43
44 /* Number of threads running. */
45 unsigned int __nptl_nthreads = 1;
46
47
48 /* Code to allocate and deallocate a stack. */
49 #include "allocatestack.c"
50
51 /* Code to create the thread. */
52 #include <createthread.c>
53
54
55 struct pthread *
56 internal_function
__find_in_stack_list(struct pthread * pd)57 __find_in_stack_list (
58 struct pthread *pd)
59 {
60 list_t *entry;
61 struct pthread *result = NULL;
62
63 lll_lock (stack_cache_lock, LLL_PRIVATE);
64
65 list_for_each (entry, &stack_used)
66 {
67 struct pthread *curp;
68
69 curp = list_entry (entry, struct pthread, list);
70 if (curp == pd)
71 {
72 result = curp;
73 break;
74 }
75 }
76
77 if (result == NULL)
78 list_for_each (entry, &__stack_user)
79 {
80 struct pthread *curp;
81
82 curp = list_entry (entry, struct pthread, list);
83 if (curp == pd)
84 {
85 result = curp;
86 break;
87 }
88 }
89
90 lll_unlock (stack_cache_lock, LLL_PRIVATE);
91
92 return result;
93 }
94
95
96 /* Deallocate POSIX thread-local-storage. */
97 void
98 attribute_hidden
__nptl_deallocate_tsd(void)99 __nptl_deallocate_tsd (void)
100 {
101 struct pthread *self = THREAD_SELF;
102
103 /* Maybe no data was ever allocated. This happens often so we have
104 a flag for this. */
105 if (THREAD_GETMEM (self, specific_used))
106 {
107 size_t round;
108 size_t cnt;
109
110 round = 0;
111 do
112 {
113 size_t idx;
114
115 /* So far no new nonzero data entry. */
116 THREAD_SETMEM (self, specific_used, false);
117
118 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
119 {
120 struct pthread_key_data *level2;
121
122 level2 = THREAD_GETMEM_NC (self, specific, cnt);
123
124 if (level2 != NULL)
125 {
126 size_t inner;
127
128 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
129 ++inner, ++idx)
130 {
131 void *data = level2[inner].data;
132
133 if (data != NULL)
134 {
135 /* Always clear the data. */
136 level2[inner].data = NULL;
137
138 /* Make sure the data corresponds to a valid
139 key. This test fails if the key was
140 deallocated and also if it was
141 re-allocated. It is the user's
142 responsibility to free the memory in this
143 case. */
144 if (level2[inner].seq
145 == __pthread_keys[idx].seq
146 /* It is not necessary to register a destructor
147 function. */
148 && __pthread_keys[idx].destr != NULL)
149 /* Call the user-provided destructor. */
150 __pthread_keys[idx].destr (data);
151 }
152 }
153 }
154 else
155 idx += PTHREAD_KEY_1STLEVEL_SIZE;
156 }
157
158 if (THREAD_GETMEM (self, specific_used) == 0)
159 /* No data has been modified. */
160 goto just_free;
161 }
162 /* We only repeat the process a fixed number of times. */
163 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
164
165 /* Just clear the memory of the first block for reuse. */
166 memset (&THREAD_SELF->specific_1stblock, '\0',
167 sizeof (self->specific_1stblock));
168
169 just_free:
170 /* Free the memory for the other blocks. */
171 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
172 {
173 struct pthread_key_data *level2;
174
175 level2 = THREAD_GETMEM_NC (self, specific, cnt);
176 if (level2 != NULL)
177 {
178 /* The first block is allocated as part of the thread
179 descriptor. */
180 free (level2);
181 THREAD_SETMEM_NC (self, specific, cnt, NULL);
182 }
183 }
184
185 THREAD_SETMEM (self, specific_used, false);
186 }
187 }
188
189
190 /* Deallocate a thread's stack after optionally making sure the thread
191 descriptor is still valid. */
192 void
193 internal_function
__free_tcb(struct pthread * pd)194 __free_tcb (struct pthread *pd)
195 {
196 /* The thread is exiting now. */
197 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
198 TERMINATED_BIT) == 0, 1))
199 {
200 /* Remove the descriptor from the list. */
201 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
202 /* Something is really wrong. The descriptor for a still
203 running thread is gone. */
204 abort ();
205
206 /* Free TPP data. */
207 if (__builtin_expect (pd->tpp != NULL, 0))
208 {
209 struct priority_protection_data *tpp = pd->tpp;
210
211 pd->tpp = NULL;
212 free (tpp);
213 }
214
215 /* Queue the stack memory block for reuse and exit the process. The
216 kernel will signal via writing to the address returned by
217 QUEUE-STACK when the stack is available. */
218 __deallocate_stack (pd);
219 }
220 }
221
222
223 static int
start_thread(void * arg)224 start_thread (void *arg)
225 {
226 struct pthread *pd = (struct pthread *) arg;
227
228 #if HP_TIMING_AVAIL
229 /* Remember the time when the thread was started. */
230 hp_timing_t now;
231 HP_TIMING_NOW (now);
232 THREAD_SETMEM (pd, cpuclock_offset, now);
233 #endif
234 #if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
235 /* Initialize resolver state pointer. */
236 __resp = &pd->res;
237 #endif
238 #ifdef __NR_set_robust_list
239 # ifndef __ASSUME_SET_ROBUST_LIST
240 if (__set_robust_list_avail >= 0)
241 # endif
242 {
243 INTERNAL_SYSCALL_DECL (err);
244 /* This call should never fail because the initial call in init.c
245 succeeded. */
246 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
247 sizeof (struct robust_list_head));
248 }
249 #endif
250
251 /* If the parent was running cancellation handlers while creating
252 the thread the new thread inherited the signal mask. Reset the
253 cancellation signal mask. */
254 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
255 {
256 INTERNAL_SYSCALL_DECL (err);
257 sigset_t mask;
258 __sigemptyset (&mask);
259 __sigaddset (&mask, SIGCANCEL);
260 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
261 NULL, _NSIG / 8);
262 }
263
264 /* This is where the try/finally block should be created. For
265 compilers without that support we do use setjmp. */
266 struct pthread_unwind_buf unwind_buf;
267
268 /* No previous handlers. */
269 unwind_buf.priv.data.prev = NULL;
270 unwind_buf.priv.data.cleanup = NULL;
271
272 int not_first_call;
273 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
274 if (__builtin_expect (! not_first_call, 1))
275 {
276 /* Store the new cleanup handler info. */
277 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
278
279 if (__builtin_expect (pd->stopped_start, 0))
280 {
281 int oldtype = CANCEL_ASYNC ();
282
283 /* Get the lock the parent locked to force synchronization. */
284 lll_lock (pd->lock, LLL_PRIVATE);
285 /* And give it up right away. */
286 lll_unlock (pd->lock, LLL_PRIVATE);
287
288 CANCEL_RESET (oldtype);
289 }
290
291 /* Run the code the user provided. */
292 #ifdef CALL_THREAD_FCT
293 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
294 #else
295 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
296 #endif
297 }
298
299 /* Run the destructor for the thread-local data. */
300 __nptl_deallocate_tsd ();
301
302 /* Clean up any state libc stored in thread-local variables. */
303 /* disable for now
304 __libc_thread_freeres ();
305 */
306 /* If this is the last thread we terminate the process now. We
307 do not notify the debugger, it might just irritate it if there
308 is no thread left. */
309 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
310 /* This was the last thread. */
311 exit (0);
312
313 /* Report the death of the thread if this is wanted. */
314 if (__builtin_expect (pd->report_events, 0))
315 {
316 /* See whether TD_DEATH is in any of the mask. */
317 const int idx = __td_eventword (TD_DEATH);
318 const uint32_t mask = __td_eventmask (TD_DEATH);
319
320 if ((mask & (__nptl_threads_events.event_bits[idx]
321 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
322 {
323 /* Yep, we have to signal the death. Add the descriptor to
324 the list but only if it is not already on it. */
325 if (pd->nextevent == NULL)
326 {
327 pd->eventbuf.eventnum = TD_DEATH;
328 pd->eventbuf.eventdata = pd;
329
330 do
331 pd->nextevent = __nptl_last_event;
332 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
333 pd, pd->nextevent));
334 }
335
336 /* Now call the function to signal the event. */
337 __nptl_death_event ();
338 }
339 }
340
341 /* The thread is exiting now. Don't set this bit until after we've hit
342 the event-reporting breakpoint, so that td_thr_get_info on us while at
343 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
344 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
345
346 #ifndef __ASSUME_SET_ROBUST_LIST
347 /* If this thread has any robust mutexes locked, handle them now. */
348 # if __WORDSIZE == 64
349 void *robust = pd->robust_head.list;
350 # else
351 __pthread_slist_t *robust = pd->robust_list.__next;
352 # endif
353 /* We let the kernel do the notification if it is able to do so.
354 If we have to do it here there for sure are no PI mutexes involved
355 since the kernel support for them is even more recent. */
356 if (__set_robust_list_avail < 0
357 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
358 {
359 do
360 {
361 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
362 ((char *) robust - offsetof (struct __pthread_mutex_s,
363 __list.__next));
364 robust = *((void **) robust);
365
366 # ifdef __PTHREAD_MUTEX_HAVE_PREV
367 this->__list.__prev = NULL;
368 # endif
369 this->__list.__next = NULL;
370
371 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
372 }
373 while (robust != (void *) &pd->robust_head);
374 }
375 #endif
376
377 /* Mark the memory of the stack as usable to the kernel. We free
378 everything except for the space used for the TCB itself. */
379 size_t pagesize_m1 = __getpagesize () - 1;
380 char *sp = CURRENT_STACK_FRAME;
381 #ifdef _STACK_GROWS_DOWN
382 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
383 #else
384 size_t freesize = ((char *) pd->stackblock - sp) & ~pagesize_m1;
385 #endif
386 assert (freesize < pd->stackblock_size);
387 if (freesize > PTHREAD_STACK_MIN)
388 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
389
390 /* If the thread is detached free the TCB. */
391 if (IS_DETACHED (pd))
392 /* Free the TCB. */
393 __free_tcb (pd);
394 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
395 {
396 /* Some other thread might call any of the setXid functions and expect
397 us to reply. In this case wait until we did that. */
398 do
399 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
400 while (pd->cancelhandling & SETXID_BITMASK);
401
402 /* Reset the value so that the stack can be reused. */
403 pd->setxid_futex = 0;
404 }
405
406 /* We cannot call '_exit' here. '_exit' will terminate the process.
407
408 The 'exit' implementation in the kernel will signal when the
409 process is really dead since 'clone' got passed the CLONE_CLEARTID
410 flag. The 'tid' field in the TCB will be set to zero.
411
412 The exit code is zero since in case all threads exit by calling
413 'pthread_exit' the exit status must be 0 (zero). */
414 __exit_thread_inline (0);
415
416 /* NOTREACHED */
417 return 0;
418 }
419
420
421 /* Default thread attributes for the case when the user does not
422 provide any. */
423 static const struct pthread_attr default_attr =
424 {
425 /* Just some value > 0 which gets rounded to the nearest page size. */
426 .guardsize = 1,
427 };
428
429
430 int
__pthread_create_2_1(pthread_t * newthread,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)431 __pthread_create_2_1 (
432 pthread_t *newthread,
433 const pthread_attr_t *attr,
434 void *(*start_routine) (void *),
435 void *arg)
436 {
437 STACK_VARIABLES;
438
439 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
440 if (iattr == NULL)
441 /* Is this the best idea? On NUMA machines this could mean
442 accessing far-away memory. */
443 iattr = &default_attr;
444
445 struct pthread *pd = NULL;
446 int err = ALLOCATE_STACK (iattr, &pd);
447 if (__builtin_expect (err != 0, 0))
448 /* Something went wrong. Maybe a parameter of the attributes is
449 invalid or we could not allocate memory. */
450 return err;
451
452
453 /* Initialize the TCB. All initializations with zero should be
454 performed in 'get_cached_stack'. This way we avoid doing this if
455 the stack freshly allocated with 'mmap'. */
456
457 #ifdef TLS_TCB_AT_TP
458 /* Reference to the TCB itself. */
459 pd->header.self = pd;
460
461 /* Self-reference for TLS. */
462 pd->header.tcb = pd;
463 #endif
464
465 /* Store the address of the start routine and the parameter. Since
466 we do not start the function directly the stillborn thread will
467 get the information from its thread descriptor. */
468 pd->start_routine = start_routine;
469 pd->arg = arg;
470
471 /* Copy the thread attribute flags. */
472 struct pthread *self = THREAD_SELF;
473 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
474 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
475
476 /* Initialize the field for the ID of the thread which is waiting
477 for us. This is a self-reference in case the thread is created
478 detached. */
479 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
480
481 /* The debug events are inherited from the parent. */
482 pd->eventbuf = self->eventbuf;
483
484
485 /* Copy the parent's scheduling parameters. The flags will say what
486 is valid and what is not. */
487 pd->schedpolicy = self->schedpolicy;
488 pd->schedparam = self->schedparam;
489
490 /* Copy the stack guard canary. */
491 #ifdef THREAD_COPY_STACK_GUARD
492 THREAD_COPY_STACK_GUARD (pd);
493 #endif
494
495 /* Copy the pointer guard value. */
496 #ifdef THREAD_COPY_POINTER_GUARD
497 THREAD_COPY_POINTER_GUARD (pd);
498 #endif
499
500 /* Determine scheduling parameters for the thread. */
501 if (attr != NULL
502 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
503 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
504 {
505 INTERNAL_SYSCALL_DECL (scerr);
506
507 /* Use the scheduling parameters the user provided. */
508 if (iattr->flags & ATTR_FLAG_POLICY_SET)
509 pd->schedpolicy = iattr->schedpolicy;
510 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
511 {
512 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
513 pd->flags |= ATTR_FLAG_POLICY_SET;
514 }
515
516 if (iattr->flags & ATTR_FLAG_SCHED_SET)
517 memcpy (&pd->schedparam, &iattr->schedparam,
518 sizeof (struct sched_param));
519 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
520 {
521 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
522 pd->flags |= ATTR_FLAG_SCHED_SET;
523 }
524
525 /* Check for valid priorities. */
526 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
527 iattr->schedpolicy);
528 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
529 iattr->schedpolicy);
530 if (pd->schedparam.sched_priority < minprio
531 || pd->schedparam.sched_priority > maxprio)
532 {
533 err = EINVAL;
534 goto errout;
535 }
536 }
537
538 /* Pass the descriptor to the caller. */
539 *newthread = (pthread_t) pd;
540
541 /* Remember whether the thread is detached or not. In case of an
542 error we have to free the stacks of non-detached stillborn
543 threads. */
544 bool is_detached = IS_DETACHED (pd);
545
546 /* Start the thread. */
547 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
548 if (err != 0)
549 {
550 /* Something went wrong. Free the resources. */
551 if (!is_detached)
552 {
553 errout:
554 __deallocate_stack (pd);
555 }
556 return err;
557 }
558
559 return 0;
560 }
561 weak_alias(__pthread_create_2_1, pthread_create)
562
563 /* Information for libthread_db. */
564
565 #include "../nptl_db/db_info.c"
566
567 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
568 functions to be present as well. */
569 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
570 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
571 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
572
573 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
574 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
575
576 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
577 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
578 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
579 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
580
581 /* UCLIBC_MUTEX_xxx macros expects to have these as well */
582 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_init)
583 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_push_defer)
584 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_pop_restore)
585