1 /* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18 
19 #include <assert.h>
20 #include <errno.h>
21 #include <signal.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <sys/mman.h>
26 #include <sys/param.h>
27 #include <tls.h>
28 #include <lowlevellock.h>
29 #include <link.h>
30 #include <bits/kernel-features.h>
31 
32 
33 #ifndef NEED_SEPARATE_REGISTER_STACK
34 
35 /* Most architectures have exactly one stack pointer.  Some have more.  */
36 # define STACK_VARIABLES void *stackaddr = NULL
37 
38 /* How to pass the values to the 'create_thread' function.  */
39 # define STACK_VARIABLES_ARGS stackaddr
40 
41 /* How to declare function which gets there parameters.  */
42 # define STACK_VARIABLES_PARMS void *stackaddr
43 
44 /* How to declare allocate_stack.  */
45 # define ALLOCATE_STACK_PARMS void **stack
46 
47 /* This is how the function is called.  We do it this way to allow
48    other variants of the function to have more parameters.  */
49 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
50 
51 #else
52 
53 /* We need two stacks.  The kernel will place them but we have to tell
54    the kernel about the size of the reserved address space.  */
55 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
56 
57 /* How to pass the values to the 'create_thread' function.  */
58 # define STACK_VARIABLES_ARGS stackaddr, stacksize
59 
60 /* How to declare function which gets there parameters.  */
61 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
62 
63 /* How to declare allocate_stack.  */
64 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
65 
66 /* This is how the function is called.  We do it this way to allow
67    other variants of the function to have more parameters.  */
68 # define ALLOCATE_STACK(attr, pd) \
69   allocate_stack (attr, pd, &stackaddr, &stacksize)
70 
71 #endif
72 
73 
74 /* Default alignment of stack.  */
75 #ifndef STACK_ALIGN
76 # define STACK_ALIGN __alignof__ (long double)
77 #endif
78 
79 /* Default value for minimal stack size after allocating thread
80    descriptor and guard.  */
81 #ifndef MINIMAL_REST_STACK
82 # define MINIMAL_REST_STACK	4096
83 #endif
84 
85 
86 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
87    a stack.  Use it when possible.  */
88 #ifndef MAP_STACK
89 # define MAP_STACK 0
90 #endif
91 
92 /* This yields the pointer that TLS support code calls the thread pointer.  */
93 #if defined(TLS_TCB_AT_TP)
94 # define TLS_TPADJ(pd) (pd)
95 #elif defined(TLS_DTV_AT_TP)
96 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
97 #endif
98 
99 /* Cache handling for not-yet free stacks.  */
100 
101 /* Maximum size in kB of cache.  */
102 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default.  */
103 static size_t stack_cache_actsize;
104 
105 /* Mutex protecting this variable.  */
106 static int stack_cache_lock = LLL_LOCK_INITIALIZER;
107 
108 /* List of queued stack frames.  */
109 static LIST_HEAD (stack_cache);
110 
111 /* List of the stacks in use.  */
112 static LIST_HEAD (stack_used);
113 
114 /* We need to record what list operations we are going to do so that,
115    in case of an asynchronous interruption due to a fork() call, we
116    can correct for the work.  */
117 static uintptr_t in_flight_stack;
118 
119 /* List of the threads with user provided stacks in use.  No need to
120    initialize this, since it's done in __pthread_initialize_minimal.  */
121 list_t __stack_user __attribute__ ((nocommon));
hidden_data_def(__stack_user)122 hidden_data_def (__stack_user)
123 
124 #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
125 /* Number of threads created.  */
126 static unsigned int nptl_ncreated;
127 #endif
128 
129 
130 /* Check whether the stack is still used or not.  */
131 #define FREE_P(descr) ((descr)->tid <= 0)
132 
133 
134 static void
135 stack_list_del (list_t *elem)
136 {
137   in_flight_stack = (uintptr_t) elem;
138 
139   atomic_write_barrier ();
140 
141   list_del (elem);
142 
143   atomic_write_barrier ();
144 
145   in_flight_stack = 0;
146 }
147 
148 
149 static void
stack_list_add(list_t * elem,list_t * list)150 stack_list_add (list_t *elem, list_t *list)
151 {
152   in_flight_stack = (uintptr_t) elem | 1;
153 
154   atomic_write_barrier ();
155 
156   list_add (elem, list);
157 
158   atomic_write_barrier ();
159 
160   in_flight_stack = 0;
161 }
162 
163 
164 /* We create a double linked list of all cache entries.  Double linked
165    because this allows removing entries from the end.  */
166 
167 
168 /* Get a stack frame from the cache.  We have to match by size since
169    some blocks might be too small or far too large.  */
170 static struct pthread *
get_cached_stack(size_t * sizep,void ** memp)171 get_cached_stack (size_t *sizep, void **memp)
172 {
173   size_t size = *sizep;
174   struct pthread *result = NULL;
175   list_t *entry;
176 
177   lll_lock (stack_cache_lock, LLL_PRIVATE);
178 
179   /* Search the cache for a matching entry.  We search for the
180      smallest stack which has at least the required size.  Note that
181      in normal situations the size of all allocated stacks is the
182      same.  As the very least there are only a few different sizes.
183      Therefore this loop will exit early most of the time with an
184      exact match.  */
185   list_for_each (entry, &stack_cache)
186     {
187       struct pthread *curr;
188 
189       curr = list_entry (entry, struct pthread, list);
190       if (FREE_P (curr) && curr->stackblock_size >= size)
191 	{
192 	  if (curr->stackblock_size == size)
193 	    {
194 	      result = curr;
195 	      break;
196 	    }
197 
198 	  if (result == NULL
199 	      || result->stackblock_size > curr->stackblock_size)
200 	    result = curr;
201 	}
202     }
203 
204   if (__builtin_expect (result == NULL, 0)
205       /* Make sure the size difference is not too excessive.  In that
206 	 case we do not use the block.  */
207       || __builtin_expect (result->stackblock_size > 4 * size, 0))
208     {
209       /* Release the lock.  */
210       lll_unlock (stack_cache_lock, LLL_PRIVATE);
211 
212       return NULL;
213     }
214 
215   /* Dequeue the entry.  */
216   stack_list_del (&result->list);
217 
218   /* And add to the list of stacks in use.  */
219   stack_list_add (&result->list, &stack_used);
220 
221   /* And decrease the cache size.  */
222   stack_cache_actsize -= result->stackblock_size;
223 
224   /* Release the lock early.  */
225   lll_unlock (stack_cache_lock, LLL_PRIVATE);
226 
227   /* Report size and location of the stack to the caller.  */
228   *sizep = result->stackblock_size;
229   *memp = result->stackblock;
230 
231   /* Cancellation handling is back to the default.  */
232   result->cancelhandling = 0;
233   result->cleanup = NULL;
234 
235   /* No pending event.  */
236   result->nextevent = NULL;
237 
238   /* Clear the DTV.  */
239   dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
240   memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
241 
242   /* Re-initialize the TLS.  */
243   _dl_allocate_tls_init (TLS_TPADJ (result));
244 
245   return result;
246 }
247 
248 
249 /* Free stacks until cache size is lower than LIMIT.  */
250 void
__free_stacks(size_t limit)251 __free_stacks (size_t limit)
252 {
253   /* We reduce the size of the cache.  Remove the last entries until
254      the size is below the limit.  */
255   list_t *entry;
256   list_t *prev;
257 
258   /* Search from the end of the list.  */
259   list_for_each_prev_safe (entry, prev, &stack_cache)
260     {
261       struct pthread *curr;
262 
263       curr = list_entry (entry, struct pthread, list);
264       if (FREE_P (curr))
265 	{
266 	  /* Unlink the block.  */
267 	  stack_list_del (entry);
268 
269 	  /* Account for the freed memory.  */
270 	  stack_cache_actsize -= curr->stackblock_size;
271 
272 	  /* Free the memory associated with the ELF TLS.  */
273 	  _dl_deallocate_tls (TLS_TPADJ (curr), false);
274 
275 	  /* Remove this block.  This should never fail.  If it does
276 	     something is really wrong.  */
277 	  if (munmap (curr->stackblock, curr->stackblock_size) != 0)
278 	    abort ();
279 
280 	  /* Maybe we have freed enough.  */
281 	  if (stack_cache_actsize <= limit)
282 	    break;
283 	}
284     }
285 }
286 
287 
288 /* Add a stack frame which is not used anymore to the stack.  Must be
289    called with the cache lock held.  */
290 static inline void
291 __attribute ((always_inline))
queue_stack(struct pthread * stack)292 queue_stack (struct pthread *stack)
293 {
294   /* We unconditionally add the stack to the list.  The memory may
295      still be in use but it will not be reused until the kernel marks
296      the stack as not used anymore.  */
297   stack_list_add (&stack->list, &stack_cache);
298 
299   stack_cache_actsize += stack->stackblock_size;
300   if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
301     __free_stacks (stack_cache_maxsize);
302 }
303 
304 
305 static int
306 internal_function
change_stack_perm(struct pthread * pd,size_t pagemask)307 change_stack_perm (struct pthread *pd
308 #ifdef NEED_SEPARATE_REGISTER_STACK
309 		   , size_t pagemask
310 #endif
311 		   )
312 {
313 #ifdef NEED_SEPARATE_REGISTER_STACK
314   void *stack = (pd->stackblock
315 		 + (((((pd->stackblock_size - pd->guardsize) / 2)
316 		      & pagemask) + pd->guardsize) & pagemask));
317   size_t len = pd->stackblock + pd->stackblock_size - stack;
318 #elif defined _STACK_GROWS_DOWN
319   void *stack = pd->stackblock + pd->guardsize;
320   size_t len = pd->stackblock_size - pd->guardsize;
321 #elif defined _STACK_GROWS_UP
322   void *stack = pd->stackblock;
323   size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
324 #else
325 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
326 #endif
327   if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
328     return errno;
329 
330   return 0;
331 }
332 
333 
334 static int
allocate_stack(const struct pthread_attr * attr,struct pthread ** pdp,ALLOCATE_STACK_PARMS)335 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
336 		ALLOCATE_STACK_PARMS)
337 {
338   struct pthread *pd;
339   size_t size;
340   size_t pagesize_m1 = __getpagesize () - 1;
341   void *stacktop;
342 
343   assert (attr != NULL);
344   assert (powerof2 (pagesize_m1 + 1));
345   assert (TCB_ALIGNMENT >= STACK_ALIGN);
346 
347   /* Get the stack size from the attribute if it is set.  Otherwise we
348      use the default we determined at start time.  */
349   size = attr->stacksize ?: __default_stacksize;
350 
351   /* Get memory for the stack.  */
352   if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
353     {
354       uintptr_t adj;
355 
356       /* If the user also specified the size of the stack make sure it
357 	 is large enough.  */
358       if (attr->stacksize != 0
359 	  && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
360 	return EINVAL;
361 
362       /* Adjust stack size for alignment of the TLS block.  */
363 #if defined(TLS_TCB_AT_TP)
364       adj = ((uintptr_t) attr->stackaddr - TLS_TCB_SIZE)
365 	    & __static_tls_align_m1;
366       assert (size > adj + TLS_TCB_SIZE);
367 #elif defined(TLS_DTV_AT_TP)
368       adj = ((uintptr_t) attr->stackaddr - __static_tls_size)
369 	    & __static_tls_align_m1;
370       assert (size > adj);
371 #endif
372 
373       /* The user provided some memory.  Let's hope it matches the
374 	 size...  We do not allocate guard pages if the user provided
375 	 the stack.  It is the user's responsibility to do this if it
376 	 is wanted.  */
377 #if defined(TLS_TCB_AT_TP)
378       pd = (struct pthread *) ((uintptr_t) attr->stackaddr
379 			       - TLS_TCB_SIZE - adj);
380 #elif defined(TLS_DTV_AT_TP)
381       pd = (struct pthread *) (((uintptr_t) attr->stackaddr
382 			        - __static_tls_size - adj)
383 			       - TLS_PRE_TCB_SIZE);
384 #endif
385 
386       /* The user provided stack memory needs to be cleared.  */
387       memset (pd, '\0', sizeof (struct pthread));
388 
389       /* The first TSD block is included in the TCB.  */
390       pd->specific[0] = pd->specific_1stblock;
391 
392       /* Remember the stack-related values.  */
393       pd->stackblock = (char *) attr->stackaddr - size;
394       pd->stackblock_size = size;
395 
396       /* This is a user-provided stack.  It will not be queued in the
397 	 stack cache nor will the memory (except the TLS memory) be freed.  */
398       pd->user_stack = true;
399 
400       /* This is at least the second thread.  */
401       pd->header.multiple_threads = 1;
402 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
403       __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
404 #endif
405 
406 #ifndef __ASSUME_PRIVATE_FUTEX
407       /* The thread must know when private futexes are supported.  */
408       pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
409 						header.private_futex);
410 #endif
411 
412 #ifdef NEED_DL_SYSINFO
413       /* Copy the sysinfo value from the parent.  */
414       THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
415 #endif
416 
417       /* The process ID is also the same as that of the caller.  */
418       pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
419 
420       /* Allocate the DTV for this thread.  */
421       if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
422 	{
423 	  /* Something went wrong.  */
424 	  assert (errno == ENOMEM);
425 	  return EAGAIN;
426 	}
427 
428 
429       /* Prepare to modify global data.  */
430       lll_lock (stack_cache_lock, LLL_PRIVATE);
431 
432       /* And add to the list of stacks in use.  */
433       list_add (&pd->list, &__stack_user);
434 
435       lll_unlock (stack_cache_lock, LLL_PRIVATE);
436     }
437   else
438     {
439       /* Allocate some anonymous memory.  If possible use the cache.  */
440       size_t guardsize;
441       size_t reqsize;
442       void *mem = 0;
443       const int prot = (PROT_READ | PROT_WRITE);
444 
445 #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
446       /* Add one more page for stack coloring.  Don't do it for stacks
447 	 with 16 times pagesize or larger.  This might just cause
448 	 unnecessary misalignment.  */
449       if (size <= 16 * pagesize_m1)
450 	size += pagesize_m1 + 1;
451 #endif
452 
453       /* Adjust the stack size for alignment.  */
454       size &= ~__static_tls_align_m1;
455       assert (size != 0);
456 
457       /* Make sure the size of the stack is enough for the guard and
458 	 eventually the thread descriptor.  */
459       guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
460       if (__builtin_expect (size < ((guardsize + __static_tls_size
461 				     + MINIMAL_REST_STACK + pagesize_m1)
462 				    & ~pagesize_m1),
463 			    0))
464 	/* The stack is too small (or the guard too large).  */
465 	return EINVAL;
466 
467       /* Try to get a stack from the cache.  */
468       reqsize = size;
469       pd = get_cached_stack (&size, &mem);
470       if (pd == NULL)
471 	{
472 	  /* To avoid aliasing effects on a larger scale than pages we
473 	     adjust the allocated stack size if necessary.  This way
474 	     allocations directly following each other will not have
475 	     aliasing problems.  */
476 #if defined MULTI_PAGE_ALIASING && MULTI_PAGE_ALIASING != 0
477 	  if ((size % MULTI_PAGE_ALIASING) == 0)
478 	    size += pagesize_m1 + 1;
479 #endif
480 
481 	  mem = mmap (NULL, size, prot,
482 		      MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
483 
484 	  if (__builtin_expect (mem == MAP_FAILED, 0))
485 	    {
486 	      if (errno == ENOMEM)
487 		__set_errno (EAGAIN);
488 
489 	       return errno;
490 	    }
491 
492 	  /* SIZE is guaranteed to be greater than zero.
493 	     So we can never get a null pointer back from mmap.  */
494 	  assert (mem != NULL);
495 
496 #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
497 	  /* Atomically increment NCREATED.  */
498 	  unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
499 
500 	  /* We chose the offset for coloring by incrementing it for
501 	     every new thread by a fixed amount.  The offset used
502 	     module the page size.  Even if coloring would be better
503 	     relative to higher alignment values it makes no sense to
504 	     do it since the mmap() interface does not allow us to
505 	     specify any alignment for the returned memory block.  */
506 	  size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
507 
508 	  /* Make sure the coloring offsets does not disturb the alignment
509 	     of the TCB and static TLS block.  */
510 	  if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0))
511 	    coloring = (((coloring + __static_tls_align_m1)
512 			 & ~(__static_tls_align_m1))
513 			& ~pagesize_m1);
514 #else
515 	  /* Unless specified we do not make any adjustments.  */
516 # define coloring 0
517 #endif
518 
519 	  /* Place the thread descriptor at the end of the stack.  */
520 #if defined(TLS_TCB_AT_TP)
521 	  pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
522 #elif defined(TLS_DTV_AT_TP)
523 	  pd = (struct pthread *) ((((uintptr_t) mem + size - coloring
524 				    - __static_tls_size)
525 				    & ~__static_tls_align_m1)
526 				   - TLS_PRE_TCB_SIZE);
527 #endif
528 
529 	  /* Remember the stack-related values.  */
530 	  pd->stackblock = mem;
531 	  pd->stackblock_size = size;
532 
533 	  /* We allocated the first block thread-specific data array.
534 	     This address will not change for the lifetime of this
535 	     descriptor.  */
536 	  pd->specific[0] = pd->specific_1stblock;
537 
538 	  /* This is at least the second thread.  */
539 	  pd->header.multiple_threads = 1;
540 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
541 	  __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
542 #endif
543 
544 #ifndef __ASSUME_PRIVATE_FUTEX
545 	  /* The thread must know when private futexes are supported.  */
546 	  pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
547                                                     header.private_futex);
548 #endif
549 
550 #ifdef NEED_DL_SYSINFO
551 	  /* Copy the sysinfo value from the parent.  */
552 	  THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
553 #endif
554 
555 	  /* The process ID is also the same as that of the caller.  */
556 	  pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
557 
558 	  /* Allocate the DTV for this thread.  */
559 	  if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
560 	    {
561 	      /* Something went wrong.  */
562 	      assert (errno == ENOMEM);
563 
564 	      /* Free the stack memory we just allocated.  */
565 	      (void) munmap (mem, size);
566 
567 	      return EAGAIN;
568 	    }
569 
570 
571 	  /* Prepare to modify global data.  */
572 	  lll_lock (stack_cache_lock, LLL_PRIVATE);
573 
574 	  /* And add to the list of stacks in use.  */
575 	  stack_list_add (&pd->list, &stack_used);
576 
577 	  lll_unlock (stack_cache_lock, LLL_PRIVATE);
578 
579 
580 	  /* Note that all of the stack and the thread descriptor is
581 	     zeroed.  This means we do not have to initialize fields
582 	     with initial value zero.  This is specifically true for
583 	     the 'tid' field which is always set back to zero once the
584 	     stack is not used anymore and for the 'guardsize' field
585 	     which will be read next.  */
586 	}
587 
588       /* Create or resize the guard area if necessary.  */
589       if (__builtin_expect (guardsize > pd->guardsize, 0))
590 	{
591 #ifdef NEED_SEPARATE_REGISTER_STACK
592 	  char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
593 #elif defined _STACK_GROWS_DOWN
594 	  char *guard = mem;
595 #elif defined _STACK_GROWS_UP
596 	  char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
597 #endif
598 	  if (mprotect (guard, guardsize, PROT_NONE) != 0)
599 	    {
600 	      int err;
601 	    mprot_error:
602 	      err = errno;
603 
604 	      lll_lock (stack_cache_lock, LLL_PRIVATE);
605 
606 	      /* Remove the thread from the list.  */
607 	      stack_list_del (&pd->list);
608 
609 	      lll_unlock (stack_cache_lock, LLL_PRIVATE);
610 
611 	      /* Get rid of the TLS block we allocated.  */
612 	      _dl_deallocate_tls (TLS_TPADJ (pd), false);
613 
614 	      /* Free the stack memory regardless of whether the size
615 		 of the cache is over the limit or not.  If this piece
616 		 of memory caused problems we better do not use it
617 		 anymore.  Uh, and we ignore possible errors.  There
618 		 is nothing we could do.  */
619 	      (void) munmap (mem, size);
620 
621 	      return err;
622 	    }
623 
624 	  pd->guardsize = guardsize;
625 	}
626       else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
627 				 0))
628 	{
629 	  /* The old guard area is too large.  */
630 
631 #ifdef NEED_SEPARATE_REGISTER_STACK
632 	  char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
633 	  char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
634 
635 	  if (oldguard < guard
636 	      && mprotect (oldguard, guard - oldguard, prot) != 0)
637 	    goto mprot_error;
638 
639 	  if (mprotect (guard + guardsize,
640 			oldguard + pd->guardsize - guard - guardsize,
641 			prot) != 0)
642 	    goto mprot_error;
643 #elif defined _STACK_GROWS_DOWN
644 	  if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
645 			prot) != 0)
646 	    goto mprot_error;
647 #elif defined _STACK_GROWS_UP
648 	  if (mprotect ((char *) (((uintptr_t) pd - pd->guardsize) & ~pagesize_m1),
649 			pd->guardsize - guardsize, prot) != 0)
650 	    goto mprot_error;
651 #endif
652 
653 	  pd->guardsize = guardsize;
654 	}
655       /* The pthread_getattr_np() calls need to get passed the size
656 	 requested in the attribute, regardless of how large the
657 	 actually used guardsize is.  */
658       pd->reported_guardsize = guardsize;
659     }
660 
661   /* Initialize the lock.  We have to do this unconditionally since the
662      stillborn thread could be canceled while the lock is taken.  */
663   pd->lock = LLL_LOCK_INITIALIZER;
664 
665   /* The robust mutex lists also need to be initialized
666      unconditionally because the cleanup for the previous stack owner
667      might have happened in the kernel.  */
668   pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
669 				  - offsetof (pthread_mutex_t,
670 					      __data.__list.__next));
671   pd->robust_head.list_op_pending = NULL;
672 #ifdef __PTHREAD_MUTEX_HAVE_PREV
673   pd->robust_prev = &pd->robust_head;
674 #endif
675   pd->robust_head.list = &pd->robust_head;
676 
677   /* We place the thread descriptor at the end of the stack.  */
678   *pdp = pd;
679 
680 #if defined(TLS_TCB_AT_TP)
681   /* The stack begins before the TCB and the static TLS block.  */
682   stacktop = ((char *) (pd + 1) - __static_tls_size);
683 #elif defined(TLS_DTV_AT_TP)
684   stacktop = (char *) (pd - 1);
685 #endif
686 
687 #ifdef NEED_SEPARATE_REGISTER_STACK
688   *stack = pd->stackblock;
689   *stacksize = stacktop - *stack;
690 #elif defined _STACK_GROWS_DOWN
691   *stack = stacktop;
692 #elif defined _STACK_GROWS_UP
693   *stack = pd->stackblock;
694   assert (*stack > 0);
695 #endif
696 
697   return 0;
698 }
699 
700 
701 void
702 internal_function
__deallocate_stack(struct pthread * pd)703 __deallocate_stack (struct pthread *pd)
704 {
705   lll_lock (stack_cache_lock, LLL_PRIVATE);
706 
707   /* Remove the thread from the list of threads with user defined
708      stacks.  */
709   stack_list_del (&pd->list);
710 
711   /* Not much to do.  Just free the mmap()ed memory.  Note that we do
712      not reset the 'used' flag in the 'tid' field.  This is done by
713      the kernel.  If no thread has been created yet this field is
714      still zero.  */
715   if (__builtin_expect (! pd->user_stack, 1))
716     (void) queue_stack (pd);
717   else
718     /* Free the memory associated with the ELF TLS.  */
719     _dl_deallocate_tls (TLS_TPADJ (pd), false);
720 
721   lll_unlock (stack_cache_lock, LLL_PRIVATE);
722 }
723 
724 
725 int
726 internal_function
__make_stacks_executable(void ** stack_endp)727 __make_stacks_executable (void **stack_endp)
728 {
729   /* First the main thread's stack.  */
730   int err = EPERM;
731   if (err != 0)
732     return err;
733 
734 #ifdef NEED_SEPARATE_REGISTER_STACK
735   const size_t pagemask = ~(__getpagesize () - 1);
736 #endif
737 
738   lll_lock (stack_cache_lock, LLL_PRIVATE);
739 
740   list_t *runp;
741   list_for_each (runp, &stack_used)
742     {
743       err = change_stack_perm (list_entry (runp, struct pthread, list)
744 #ifdef NEED_SEPARATE_REGISTER_STACK
745 			       , pagemask
746 #endif
747 			       );
748       if (err != 0)
749 	break;
750     }
751 
752   /* Also change the permission for the currently unused stacks.  This
753      might be wasted time but better spend it here than adding a check
754      in the fast path.  */
755   if (err == 0)
756     list_for_each (runp, &stack_cache)
757       {
758 	err = change_stack_perm (list_entry (runp, struct pthread, list)
759 #ifdef NEED_SEPARATE_REGISTER_STACK
760 				 , pagemask
761 #endif
762 				 );
763 	if (err != 0)
764 	  break;
765       }
766 
767   lll_unlock (stack_cache_lock, LLL_PRIVATE);
768 
769   return err;
770 }
771 
772 
773 /* In case of a fork() call the memory allocation in the child will be
774    the same but only one thread is running.  All stacks except that of
775    the one running thread are not used anymore.  We have to recycle
776    them.  */
777 void
__reclaim_stacks(void)778 __reclaim_stacks (void)
779 {
780   struct pthread *self = (struct pthread *) THREAD_SELF;
781 
782   /* No locking necessary.  The caller is the only stack in use.  But
783      we have to be aware that we might have interrupted a list
784      operation.  */
785 
786   if (in_flight_stack != 0)
787     {
788       bool add_p = in_flight_stack & 1;
789       list_t *elem = (list_t *) (in_flight_stack & ~UINTMAX_C (1));
790 
791       if (add_p)
792 	{
793 	  /* We always add at the beginning of the list.  So in this
794 	     case we only need to check the beginning of these lists.  */
795 	  int check_list (list_t *l)
796 	  {
797 	    if (l->next->prev != l)
798 	      {
799 		assert (l->next->prev == elem);
800 
801 		elem->next = l->next;
802 		elem->prev = l;
803 		l->next = elem;
804 
805 		return 1;
806 	      }
807 
808 	    return 0;
809 	  }
810 
811 	  if (check_list (&stack_used) == 0)
812 	    (void) check_list (&stack_cache);
813 	}
814       else
815 	{
816 	  /* We can simply always replay the delete operation.  */
817 	  elem->next->prev = elem->prev;
818 	  elem->prev->next = elem->next;
819 	}
820     }
821 
822   /* Mark all stacks except the still running one as free.  */
823   list_t *runp;
824   list_for_each (runp, &stack_used)
825     {
826       struct pthread *curp = list_entry (runp, struct pthread, list);
827       if (curp != self)
828 	{
829 	  /* This marks the stack as free.  */
830 	  curp->tid = 0;
831 
832 	  /* The PID field must be initialized for the new process.  */
833 	  curp->pid = self->pid;
834 
835 	  /* Account for the size of the stack.  */
836 	  stack_cache_actsize += curp->stackblock_size;
837 
838 	  if (curp->specific_used)
839 	    {
840 	      /* Clear the thread-specific data.  */
841 	      memset (curp->specific_1stblock, '\0',
842 		      sizeof (curp->specific_1stblock));
843 
844 	      curp->specific_used = false;
845 
846 	      size_t cnt;
847 	      for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
848 		if (curp->specific[cnt] != NULL)
849 		  {
850 		    memset (curp->specific[cnt], '\0',
851 			    sizeof (curp->specific_1stblock));
852 
853 		    /* We have allocated the block which we do not
854 		       free here so re-set the bit.  */
855 		    curp->specific_used = true;
856 		  }
857 	    }
858 	}
859     }
860 
861   /* Reset the PIDs in any cached stacks.  */
862   list_for_each (runp, &stack_cache)
863     {
864       struct pthread *curp = list_entry (runp, struct pthread, list);
865       curp->pid = self->pid;
866     }
867 
868   /* Add the stack of all running threads to the cache.  */
869   list_splice (&stack_used, &stack_cache);
870 
871   /* Remove the entry for the current thread to from the cache list
872      and add it to the list of running threads.  Which of the two
873      lists is decided by the user_stack flag.  */
874   stack_list_del (&self->list);
875 
876   /* Re-initialize the lists for all the threads.  */
877   INIT_LIST_HEAD (&stack_used);
878   INIT_LIST_HEAD (&__stack_user);
879 
880   if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
881     list_add (&self->list, &__stack_user);
882   else
883     list_add (&self->list, &stack_used);
884 
885   /* There is one thread running.  */
886   __nptl_nthreads = 1;
887 
888   in_flight_stack = 0;
889 
890   /* Initialize the lock.  */
891   stack_cache_lock = LLL_LOCK_INITIALIZER;
892 }
893 
894 
895 #if HP_TIMING_AVAIL
896 # undef __find_thread_by_id
897 /* Find a thread given the thread ID.  */
898 attribute_hidden
899 struct pthread *
__find_thread_by_id(pid_t tid)900 __find_thread_by_id (pid_t tid)
901 {
902   struct pthread *result = NULL;
903 
904   lll_lock (stack_cache_lock, LLL_PRIVATE);
905 
906   /* Iterate over the list with system-allocated threads first.  */
907   list_t *runp;
908   list_for_each (runp, &stack_used)
909     {
910       struct pthread *curp;
911 
912       curp = list_entry (runp, struct pthread, list);
913 
914       if (curp->tid == tid)
915 	{
916 	  result = curp;
917 	  goto out;
918 	}
919     }
920 
921   /* Now the list with threads using user-allocated stacks.  */
922   list_for_each (runp, &__stack_user)
923     {
924       struct pthread *curp;
925 
926       curp = list_entry (runp, struct pthread, list);
927 
928       if (curp->tid == tid)
929 	{
930 	  result = curp;
931 	  goto out;
932 	}
933     }
934 
935  out:
936   lll_unlock (stack_cache_lock, LLL_PRIVATE);
937 
938   return result;
939 }
940 #endif
941 
942 
943 static void
944 internal_function
setxid_mark_thread(struct xid_command * cmdp,struct pthread * t)945 setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
946 {
947   int ch;
948 
949   /* Don't let the thread exit before the setxid handler runs.  */
950   t->setxid_futex = 0;
951 
952   do
953     {
954       ch = t->cancelhandling;
955 
956       /* If the thread is exiting right now, ignore it.  */
957       if ((ch & EXITING_BITMASK) != 0)
958 	return;
959     }
960   while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
961 					       ch | SETXID_BITMASK, ch));
962 }
963 
964 
965 static void
966 internal_function
setxid_unmark_thread(struct xid_command * cmdp,struct pthread * t)967 setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
968 {
969   int ch;
970 
971   do
972     {
973       ch = t->cancelhandling;
974       if ((ch & SETXID_BITMASK) == 0)
975 	return;
976     }
977   while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
978 					       ch & ~SETXID_BITMASK, ch));
979 
980   /* Release the futex just in case.  */
981   t->setxid_futex = 1;
982   lll_futex_wake (&t->setxid_futex, 1, LLL_PRIVATE);
983 }
984 
985 
986 static int
987 internal_function
setxid_signal_thread(struct xid_command * cmdp,struct pthread * t)988 setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
989 {
990   if ((t->cancelhandling & SETXID_BITMASK) == 0)
991     return 0;
992 
993   int val;
994   INTERNAL_SYSCALL_DECL (err);
995 #if defined (__ASSUME_TGKILL) && __ASSUME_TGKILL
996   val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
997 			  t->tid, SIGSETXID);
998 #else
999 # ifdef __NR_tgkill
1000   val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
1001 			  t->tid, SIGSETXID);
1002   if (INTERNAL_SYSCALL_ERROR_P (val, err)
1003       && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
1004 # endif
1005     val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
1006 #endif
1007 
1008   /* If this failed, it must have had not started yet or else exited.  */
1009   if (!INTERNAL_SYSCALL_ERROR_P (val, err))
1010     {
1011       atomic_increment (&cmdp->cntr);
1012       return 1;
1013     }
1014   else
1015     return 0;
1016 }
1017 
1018 
1019 int
1020 attribute_hidden
__nptl_setxid(struct xid_command * cmdp)1021 __nptl_setxid (struct xid_command *cmdp)
1022 {
1023   int signalled;
1024   int result;
1025   lll_lock (stack_cache_lock, LLL_PRIVATE);
1026 
1027   __xidcmd = cmdp;
1028   cmdp->cntr = 0;
1029 
1030   struct pthread *self = THREAD_SELF;
1031 
1032   /* Iterate over the list with system-allocated threads first.  */
1033   list_t *runp;
1034   list_for_each (runp, &stack_used)
1035     {
1036       struct pthread *t = list_entry (runp, struct pthread, list);
1037       if (t == self)
1038 	continue;
1039 
1040       setxid_mark_thread (cmdp, t);
1041     }
1042 
1043   /* Now the list with threads using user-allocated stacks.  */
1044   list_for_each (runp, &__stack_user)
1045     {
1046       struct pthread *t = list_entry (runp, struct pthread, list);
1047       if (t == self)
1048 	continue;
1049 
1050       setxid_mark_thread (cmdp, t);
1051     }
1052 
1053   /* Iterate until we don't succeed in signalling anyone.  That means
1054      we have gotten all running threads, and their children will be
1055      automatically correct once started.  */
1056   do
1057     {
1058       signalled = 0;
1059 
1060       list_for_each (runp, &stack_used)
1061 	{
1062 	  struct pthread *t = list_entry (runp, struct pthread, list);
1063 	  if (t == self)
1064 	    continue;
1065 
1066 	  signalled += setxid_signal_thread (cmdp, t);
1067 	}
1068 
1069       list_for_each (runp, &__stack_user)
1070 	{
1071 	  struct pthread *t = list_entry (runp, struct pthread, list);
1072 	  if (t == self)
1073 	    continue;
1074 
1075 	  signalled += setxid_signal_thread (cmdp, t);
1076 	}
1077 
1078       int cur = cmdp->cntr;
1079       while (cur != 0)
1080 	{
1081 	  lll_futex_wait (&cmdp->cntr, cur, LLL_PRIVATE);
1082 	  cur = cmdp->cntr;
1083 	}
1084     }
1085   while (signalled != 0);
1086 
1087   /* Clean up flags, so that no thread blocks during exit waiting
1088      for a signal which will never come.  */
1089   list_for_each (runp, &stack_used)
1090     {
1091       struct pthread *t = list_entry (runp, struct pthread, list);
1092       if (t == self)
1093 	continue;
1094 
1095       setxid_unmark_thread (cmdp, t);
1096     }
1097 
1098   list_for_each (runp, &__stack_user)
1099     {
1100       struct pthread *t = list_entry (runp, struct pthread, list);
1101       if (t == self)
1102 	continue;
1103 
1104       setxid_unmark_thread (cmdp, t);
1105     }
1106 
1107   /* This must be last, otherwise the current thread might not have
1108      permissions to send SIGSETXID syscall to the other threads.  */
1109   INTERNAL_SYSCALL_DECL (err);
1110   result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
1111 				 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
1112   if (INTERNAL_SYSCALL_ERROR_P (result, err))
1113     {
1114       __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
1115       result = -1;
1116     }
1117 
1118   lll_unlock (stack_cache_lock, LLL_PRIVATE);
1119   return result;
1120 }
1121 
1122 static inline void __attribute__((always_inline))
init_one_static_tls(struct pthread * curp,struct link_map * map)1123 init_one_static_tls (struct pthread *curp, struct link_map *map)
1124 {
1125   dtv_t *dtv = GET_DTV (TLS_TPADJ (curp));
1126 # if defined(TLS_TCB_AT_TP)
1127   void *dest = (char *) curp - map->l_tls_offset;
1128 # elif defined(TLS_DTV_AT_TP)
1129   void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
1130 # else
1131 #  error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1132 # endif
1133 
1134   /* Fill in the DTV slot so that a later LD/GD access will find it.  */
1135   dtv[map->l_tls_modid].pointer.val = dest;
1136   dtv[map->l_tls_modid].pointer.is_static = true;
1137 
1138   /* Initialize the memory.  */
1139   memset (mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
1140 	  '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
1141 }
1142 
1143 void
1144 attribute_hidden
__pthread_init_static_tls(struct link_map * map)1145 __pthread_init_static_tls (struct link_map *map)
1146 {
1147   lll_lock (stack_cache_lock, LLL_PRIVATE);
1148 
1149   /* Iterate over the list with system-allocated threads first.  */
1150   list_t *runp;
1151   list_for_each (runp, &stack_used)
1152     init_one_static_tls (list_entry (runp, struct pthread, list), map);
1153 
1154   /* Now the list with threads using user-allocated stacks.  */
1155   list_for_each (runp, &__stack_user)
1156     init_one_static_tls (list_entry (runp, struct pthread, list), map);
1157 
1158   lll_unlock (stack_cache_lock, LLL_PRIVATE);
1159 }
1160 
1161 
1162 void
1163 attribute_hidden
__wait_lookup_done(void)1164 __wait_lookup_done (void)
1165 {
1166   lll_lock (stack_cache_lock, LLL_PRIVATE);
1167 
1168   struct pthread *self = THREAD_SELF;
1169 
1170   /* Iterate over the list with system-allocated threads first.  */
1171   list_t *runp;
1172   list_for_each (runp, &stack_used)
1173     {
1174       struct pthread *t = list_entry (runp, struct pthread, list);
1175       if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1176 	continue;
1177 
1178       int *const gscope_flagp = &t->header.gscope_flag;
1179 
1180       /* We have to wait until this thread is done with the global
1181 	 scope.  First tell the thread that we are waiting and
1182 	 possibly have to be woken.  */
1183       if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1184 						THREAD_GSCOPE_FLAG_WAIT,
1185 						THREAD_GSCOPE_FLAG_USED))
1186 	continue;
1187 
1188       do
1189 	lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
1190       while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1191     }
1192 
1193   /* Now the list with threads using user-allocated stacks.  */
1194   list_for_each (runp, &__stack_user)
1195     {
1196       struct pthread *t = list_entry (runp, struct pthread, list);
1197       if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1198 	continue;
1199 
1200       int *const gscope_flagp = &t->header.gscope_flag;
1201 
1202       /* We have to wait until this thread is done with the global
1203 	 scope.  First tell the thread that we are waiting and
1204 	 possibly have to be woken.  */
1205       if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1206 						THREAD_GSCOPE_FLAG_WAIT,
1207 						THREAD_GSCOPE_FLAG_USED))
1208 	continue;
1209 
1210       do
1211 	lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
1212       while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1213     }
1214 
1215   lll_unlock (stack_cache_lock, LLL_PRIVATE);
1216 }
1217