1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #if defined SHARED || defined NOT_IN_libc
20 # error in buildsystem: This file is for libc.a
21 #endif
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <sys/param.h>
25 #include <tls.h>
26 #include <dl-tls.h>
27 #include <ldsodefs.h>
28 #include <dl-elf.h>
29 #include <dl-hash.h>
30
31 #include <assert.h>
32 #include <link.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <stdio.h>
36
37 #define _dl_malloc malloc
38 #define _dl_memset memset
39 #define _dl_mempcpy mempcpy
40 #define _dl_dprintf fprintf
41 #define _dl_debug_file stderr
42 #define _dl_exit exit
43
44 /* Amount of excess space to allocate in the static TLS area
45 to allow dynamic loading of modules defining IE-model TLS data. */
46 # define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
47
48
49 #ifndef SHARED
50 extern dtv_t static_dtv;
51 #endif
52
53 /* Out-of-memory handler. */
54 # ifdef SHARED
55 static void
56 __attribute__ ((__noreturn__))
oom(void)57 oom (void)
58 {
59 do {
60 _dl_dprintf (_dl_debug_file,
61 "cannot allocate thread-local memory: ABORT\n");
62 _dl_exit (127);
63 } while (1);
64 }
65 # endif
66
67
68 void *_dl_memalign(size_t alignment, size_t bytes);
_dl_memalign(size_t alignment,size_t bytes)69 void *_dl_memalign(size_t alignment, size_t bytes)
70 {
71 return _dl_malloc(bytes);
72 }
73
74
75 /*
76 * We are trying to perform a static TLS relocation in MAP, but it was
77 * dynamically loaded. This can only work if there is enough surplus in
78 * the static TLS area already allocated for each running thread. If this
79 * object's TLS segment is too big to fit, we fail. If it fits,
80 * we set MAP->l_tls_offset and return.
81 * This function intentionally does not return any value but signals error
82 * directly, as static TLS should be rare and code handling it should
83 * not be inlined as much as possible.
84 */
85
86
87 void
88 internal_function __attribute_noinline__
_dl_allocate_static_tls(struct link_map * map)89 _dl_allocate_static_tls (struct link_map *map)
90 {
91 /* If the alignment requirements are too high fail. */
92 if (map->l_tls_align > _dl_tls_static_align)
93 {
94 fail:
95 _dl_dprintf(_dl_debug_file, "cannot allocate memory in static TLS block");
96 _dl_exit(30);
97 }
98
99 # if defined(TLS_TCB_AT_TP)
100 size_t freebytes;
101 size_t n;
102 size_t blsize;
103
104 freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE;
105
106 blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
107 if (freebytes < blsize)
108 goto fail;
109
110 n = (freebytes - blsize) / map->l_tls_align;
111
112 size_t offset = _dl_tls_static_used + (freebytes - n * map->l_tls_align
113 - map->l_tls_firstbyte_offset);
114
115 map->l_tls_offset = _dl_tls_static_used = offset;
116 # elif defined(TLS_DTV_AT_TP)
117 size_t used;
118 size_t check;
119
120 size_t offset = roundup (_dl_tls_static_used, map->l_tls_align);
121 used = offset + map->l_tls_blocksize;
122 check = used;
123
124 /* dl_tls_static_used includes the TCB at the beginning. */
125 if (check > _dl_tls_static_size)
126 goto fail;
127
128 map->l_tls_offset = offset;
129 _dl_tls_static_used = used;
130 # else
131 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
132 # endif
133
134 /*
135 * If the object is not yet relocated we cannot initialize the
136 * static TLS region. Delay it.
137 */
138 if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE)
139 {
140 #ifdef SHARED
141 /*
142 * Update the slot information data for at least the generation of
143 * the DSO we are allocating data for.
144 */
145 if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0))
146 (void) _dl_update_slotinfo (map->l_tls_modid);
147 #endif
148 _dl_init_static_tls (map);
149 }
150 else
151 map->l_need_tls_init = 1;
152 }
153
154 size_t
155 internal_function
_dl_next_tls_modid(void)156 _dl_next_tls_modid (void)
157 {
158 size_t result;
159
160 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
161 {
162 size_t disp = 0;
163 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
164
165 /* Note that this branch will never be executed during program
166 start since there are no gaps at that time. Therefore it
167 does not matter that the dl_tls_dtv_slotinfo is not allocated
168 yet when the function is called for the first times.
169
170 NB: the offset +1 is due to the fact that DTV[0] is used
171 for something else. */
172 result = GL(dl_tls_static_nelem) + 1;
173 if (result <= GL(dl_tls_max_dtv_idx))
174 do
175 {
176 while (result - disp < runp->len)
177 {
178 if (runp->slotinfo[result - disp].map == NULL)
179 break;
180
181 ++result;
182 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
183 }
184
185 if (result - disp < runp->len)
186 break;
187
188 disp += runp->len;
189 }
190 while ((runp = runp->next) != NULL);
191
192 if (result > GL(dl_tls_max_dtv_idx))
193 {
194 /* The new index must indeed be exactly one higher than the
195 previous high. */
196 assert (result == GL(dl_tls_max_dtv_idx) + 1);
197 /* There is no gap anymore. */
198 GL(dl_tls_dtv_gaps) = false;
199
200 goto nogaps;
201 }
202 }
203 else
204 {
205 /* No gaps, allocate a new entry. */
206 nogaps:
207
208 result = ++GL(dl_tls_max_dtv_idx);
209 }
210
211 return result;
212 }
213
214
215 # ifdef SHARED
216 void
217 internal_function
_dl_determine_tlsoffset(void)218 _dl_determine_tlsoffset (void)
219 {
220 size_t max_align = TLS_TCB_ALIGN;
221 size_t freetop = 0;
222 size_t freebottom = 0;
223
224 /* The first element of the dtv slot info list is allocated. */
225 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
226 /* There is at this point only one element in the
227 dl_tls_dtv_slotinfo_list list. */
228 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
229
230 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
231
232 /* Determining the offset of the various parts of the static TLS
233 block has several dependencies. In addition we have to work
234 around bugs in some toolchains.
235
236 Each TLS block from the objects available at link time has a size
237 and an alignment requirement. The GNU ld computes the alignment
238 requirements for the data at the positions *in the file*, though.
239 I.e, it is not simply possible to allocate a block with the size
240 of the TLS program header entry. The data is layed out assuming
241 that the first byte of the TLS block fulfills
242
243 p_vaddr mod p_align == &TLS_BLOCK mod p_align
244
245 This means we have to add artificial padding at the beginning of
246 the TLS block. These bytes are never used for the TLS data in
247 this module but the first byte allocated must be aligned
248 according to mod p_align == 0 so that the first byte of the TLS
249 block is aligned according to p_vaddr mod p_align. This is ugly
250 and the linker can help by computing the offsets in the TLS block
251 assuming the first byte of the TLS block is aligned according to
252 p_align.
253
254 The extra space which might be allocated before the first byte of
255 the TLS block need not go unused. The code below tries to use
256 that memory for the next TLS block. This can work if the total
257 memory requirement for the next TLS block is smaller than the
258 gap. */
259
260 # if defined(TLS_TCB_AT_TP)
261 /* We simply start with zero. */
262 size_t offset = 0;
263
264 size_t cnt;
265 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
266 {
267 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
268
269 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
270 & (slotinfo[cnt].map->l_tls_align - 1));
271 size_t off;
272 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
273
274 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
275 {
276 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
277 - firstbyte, slotinfo[cnt].map->l_tls_align)
278 + firstbyte;
279 if (off <= freebottom)
280 {
281 freetop = off;
282
283 /* XXX For some architectures we perhaps should store the
284 negative offset. */
285 slotinfo[cnt].map->l_tls_offset = off;
286 continue;
287 }
288 }
289
290 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
291 slotinfo[cnt].map->l_tls_align) + firstbyte;
292 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
293 + (freebottom - freetop))
294 {
295 freetop = offset;
296 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
297 }
298 offset = off;
299
300 /* XXX For some architectures we perhaps should store the
301 negative offset. */
302 slotinfo[cnt].map->l_tls_offset = off;
303 }
304
305 GL(dl_tls_static_used) = offset;
306 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
307 + TLS_TCB_SIZE);
308 # elif defined(TLS_DTV_AT_TP)
309 /* The TLS blocks start right after the TCB. */
310 size_t offset = TLS_TCB_SIZE;
311 size_t cnt;
312
313 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
314 {
315 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
316
317 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
318 & (slotinfo[cnt].map->l_tls_align - 1));
319 size_t off;
320 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
321
322 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
323 {
324 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
325 if (off - freebottom < firstbyte)
326 off += slotinfo[cnt].map->l_tls_align;
327 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
328 {
329 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
330 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
331 - firstbyte);
332 continue;
333 }
334 }
335
336 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
337 if (off - offset < firstbyte)
338 off += slotinfo[cnt].map->l_tls_align;
339
340 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
341 if (off - firstbyte - offset > freetop - freebottom)
342 {
343 freebottom = offset;
344 freetop = off - firstbyte;
345 }
346
347 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
348 }
349
350 GL(dl_tls_static_used) = offset;
351 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
352 TLS_TCB_ALIGN);
353 # else
354 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
355 # endif
356
357 /* The alignment requirement for the static TLS block. */
358 GL(dl_tls_static_align) = max_align;
359 }
360
361
362 /* This is called only when the data structure setup was skipped at startup,
363 when there was no need for it then. Now we have dynamically loaded
364 something needing TLS, or libpthread needs it. */
365 int
366 internal_function
_dl_tls_setup(void)367 _dl_tls_setup (void)
368 {
369 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
370 assert (GL(dl_tls_max_dtv_idx) == 0);
371
372 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
373
374 GL(dl_tls_dtv_slotinfo_list)
375 = calloc (1, (sizeof (struct dtv_slotinfo_list)
376 + nelem * sizeof (struct dtv_slotinfo)));
377 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
378 return -1;
379
380 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
381
382 /* Number of elements in the static TLS block. It can't be zero
383 because of various assumptions. The one element is null. */
384 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
385
386 /* This initializes more variables for us. */
387 _dl_determine_tlsoffset ();
388
389 return 0;
390 }
391 # endif
392
393 static void *
394 internal_function
allocate_dtv(void * result)395 allocate_dtv (void *result)
396 {
397 dtv_t *dtv;
398 size_t dtv_length;
399
400 /* We allocate a few more elements in the dtv than are needed for the
401 initial set of modules. This should avoid in most cases expansions
402 of the dtv. */
403 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
404 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
405 if (dtv != NULL)
406 {
407 /* This is the initial length of the dtv. */
408 dtv[0].counter = dtv_length;
409
410 /* The rest of the dtv (including the generation counter) is
411 Initialize with zero to indicate nothing there. */
412
413 /* Add the dtv to the thread data structures. */
414 INSTALL_DTV (result, dtv);
415 }
416 else
417 result = NULL;
418
419 return result;
420 }
421
422
423 /* Get size and alignment requirements of the static TLS block. */
424 void
425 internal_function
_dl_get_tls_static_info(size_t * sizep,size_t * alignp)426 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
427 {
428 *sizep = GL(dl_tls_static_size);
429 *alignp = GL(dl_tls_static_align);
430 }
431
432
433 void *
434 internal_function
_dl_allocate_tls_storage(void)435 _dl_allocate_tls_storage (void)
436 {
437 void *result;
438 size_t size = GL(dl_tls_static_size);
439
440 # if defined(TLS_DTV_AT_TP)
441 /* Memory layout is:
442 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
443 ^ This should be returned. */
444 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
445 & ~(GL(dl_tls_static_align) - 1);
446 # endif
447
448 /* Allocate a correctly aligned chunk of memory. */
449 result = _dl_memalign (GL(dl_tls_static_align), size);
450 if (__builtin_expect (result != NULL, 1))
451 {
452 /* Allocate the DTV. */
453 void *allocated = result;
454
455 # if defined(TLS_TCB_AT_TP)
456 /* The TCB follows the TLS blocks. */
457 result = (char *) result + size - TLS_TCB_SIZE;
458
459 /* Clear the TCB data structure. We can't ask the caller (i.e.
460 libpthread) to do it, because we will initialize the DTV et al. */
461 _dl_memset (result, '\0', TLS_TCB_SIZE);
462 # elif defined(TLS_DTV_AT_TP)
463 result = (char *) result + size - GL(dl_tls_static_size);
464
465 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
466 We can't ask the caller (i.e. libpthread) to do it, because we will
467 initialize the DTV et al. */
468 _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
469 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
470 # endif
471
472 result = allocate_dtv (result);
473 if (result == NULL)
474 free (allocated);
475 }
476
477 return result;
478 }
479
480
481 void *
482 internal_function
_dl_allocate_tls_init(void * result)483 _dl_allocate_tls_init (void *result)
484 {
485 if (result == NULL)
486 /* The memory allocation failed. */
487 return NULL;
488
489 dtv_t *dtv = GET_DTV (result);
490 struct dtv_slotinfo_list *listp;
491 size_t total = 0;
492 size_t maxgen = 0;
493
494 /* We have to prepare the dtv for all currently loaded modules using
495 TLS. For those which are dynamically loaded we add the values
496 indicating deferred allocation. */
497 listp = GL(dl_tls_dtv_slotinfo_list);
498 while (1)
499 {
500 size_t cnt;
501
502 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
503 {
504 struct link_map *map;
505 void *dest;
506
507 /* Check for the total number of used slots. */
508 if (total + cnt > GL(dl_tls_max_dtv_idx))
509 break;
510
511 map = listp->slotinfo[cnt].map;
512 if (map == NULL)
513 /* Unused entry. */
514 continue;
515
516 /* Keep track of the maximum generation number. This might
517 not be the generation counter. */
518 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
519
520 if (map->l_tls_offset == NO_TLS_OFFSET)
521 {
522 /* For dynamically loaded modules we simply store
523 the value indicating deferred allocation. */
524 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
525 dtv[map->l_tls_modid].pointer.is_static = false;
526 continue;
527 }
528
529 assert (map->l_tls_modid == cnt);
530 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
531 # if defined(TLS_TCB_AT_TP)
532 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
533 dest = (char *) result - map->l_tls_offset;
534 # elif defined(TLS_DTV_AT_TP)
535 dest = (char *) result + map->l_tls_offset;
536 # else
537 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
538 # endif
539
540 /* Copy the initialization image and clear the BSS part. */
541 dtv[map->l_tls_modid].pointer.val = dest;
542 dtv[map->l_tls_modid].pointer.is_static = true;
543 _dl_memset (_dl_mempcpy (dest, map->l_tls_initimage,
544 map->l_tls_initimage_size), '\0',
545 map->l_tls_blocksize - map->l_tls_initimage_size);
546 }
547
548 total += cnt;
549 if (total >= GL(dl_tls_max_dtv_idx))
550 break;
551
552 listp = listp->next;
553 assert (listp != NULL);
554 }
555
556 /* The DTV version is up-to-date now. */
557 dtv[0].counter = maxgen;
558
559 return result;
560 }
561
562 void *
563 internal_function
_dl_allocate_tls(void * mem)564 _dl_allocate_tls (void *mem)
565 {
566 return _dl_allocate_tls_init (mem == NULL
567 ? _dl_allocate_tls_storage ()
568 : allocate_dtv (mem));
569 }
570
571
572 void
573 internal_function
_dl_deallocate_tls(void * tcb,bool dealloc_tcb)574 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
575 {
576 dtv_t *dtv = GET_DTV (tcb);
577 size_t cnt;
578
579 /* We need to free the memory allocated for non-static TLS. */
580 for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
581 if (! dtv[1 + cnt].pointer.is_static
582 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
583 free (dtv[1 + cnt].pointer.val);
584
585 /* The array starts with dtv[-1]. */
586 #ifdef SHARED
587 if (dtv != GL(dl_initial_dtv))
588 #else
589 if ((dtv - 1) != &static_dtv)
590 #endif
591 free (dtv - 1);
592
593 if (dealloc_tcb)
594 {
595 # if defined(TLS_TCB_AT_TP)
596 /* The TCB follows the TLS blocks. Back up to free the whole block. */
597 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
598 # elif defined(TLS_DTV_AT_TP)
599 /* Back up the TLS_PRE_TCB_SIZE bytes. */
600 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
601 & ~(GL(dl_tls_static_align) - 1);
602 # endif
603 free (tcb);
604 }
605 }
606
607
608 # ifdef SHARED
609 /* The __tls_get_addr function has two basic forms which differ in the
610 arguments. The IA-64 form takes two parameters, the module ID and
611 offset. The form used, among others, on IA-32 takes a reference to
612 a special structure which contain the same information. The second
613 form seems to be more often used (in the moment) so we default to
614 it. Users of the IA-64 form have to provide adequate definitions
615 of the following macros. */
616 # ifndef GET_ADDR_ARGS
617 # define GET_ADDR_ARGS tls_index *ti
618 # endif
619 # ifndef GET_ADDR_MODULE
620 # define GET_ADDR_MODULE ti->ti_module
621 # endif
622 # ifndef GET_ADDR_OFFSET
623 # define GET_ADDR_OFFSET ti->ti_offset
624 # endif
625
626
627 static void *
allocate_and_init(struct link_map * map)628 allocate_and_init (struct link_map *map)
629 {
630 void *newp;
631
632 newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
633 if (newp == NULL)
634 oom ();
635
636 /* Initialize the memory. */
637 _dl_memset (_dl_mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
638 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
639
640 return newp;
641 }
642
643
644 struct link_map *
_dl_update_slotinfo(unsigned long int req_modid)645 _dl_update_slotinfo (unsigned long int req_modid)
646 {
647 struct link_map *the_map = NULL;
648 dtv_t *dtv = THREAD_DTV ();
649
650 /* The global dl_tls_dtv_slotinfo array contains for each module
651 index the generation counter current when the entry was created.
652 This array never shrinks so that all module indices which were
653 valid at some time can be used to access it. Before the first
654 use of a new module index in this function the array was extended
655 appropriately. Access also does not have to be guarded against
656 modifications of the array. It is assumed that pointer-size
657 values can be read atomically even in SMP environments. It is
658 possible that other threads at the same time dynamically load
659 code and therefore add to the slotinfo list. This is a problem
660 since we must not pick up any information about incomplete work.
661 The solution to this is to ignore all dtv slots which were
662 created after the one we are currently interested. We know that
663 dynamic loading for this module is completed and this is the last
664 load operation we know finished. */
665 unsigned long int idx = req_modid;
666 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
667
668 while (idx >= listp->len)
669 {
670 idx -= listp->len;
671 listp = listp->next;
672 }
673
674 if (dtv[0].counter < listp->slotinfo[idx].gen)
675 {
676 /* The generation counter for the slot is higher than what the
677 current dtv implements. We have to update the whole dtv but
678 only those entries with a generation counter <= the one for
679 the entry we need. */
680 size_t new_gen = listp->slotinfo[idx].gen;
681 size_t total = 0;
682
683 /* We have to look through the entire dtv slotinfo list. */
684 listp = GL(dl_tls_dtv_slotinfo_list);
685 do
686 {
687 size_t cnt;
688
689 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
690 {
691 size_t gen = listp->slotinfo[cnt].gen;
692
693 if (gen > new_gen)
694 /* This is a slot for a generation younger than the
695 one we are handling now. It might be incompletely
696 set up so ignore it. */
697 continue;
698
699 /* If the entry is older than the current dtv layout we
700 know we don't have to handle it. */
701 if (gen <= dtv[0].counter)
702 continue;
703
704 /* If there is no map this means the entry is empty. */
705 struct link_map *map = listp->slotinfo[cnt].map;
706 if (map == NULL)
707 {
708 /* If this modid was used at some point the memory
709 might still be allocated. */
710 if (! dtv[total + cnt].pointer.is_static
711 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
712 {
713 free (dtv[total + cnt].pointer.val);
714 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
715 }
716
717 continue;
718 }
719
720 /* Check whether the current dtv array is large enough. */
721 size_t modid = map->l_tls_modid;
722 assert (total + cnt == modid);
723 if (dtv[-1].counter < modid)
724 {
725 /* Reallocate the dtv. */
726 dtv_t *newp;
727 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
728 size_t oldsize = dtv[-1].counter;
729
730 assert (map->l_tls_modid <= newsize);
731
732 if (dtv == GL(dl_initial_dtv))
733 {
734 /* This is the initial dtv that was allocated
735 during rtld startup using the dl-minimal.c
736 malloc instead of the real malloc. We can't
737 free it, we have to abandon the old storage. */
738
739 newp = malloc ((2 + newsize) * sizeof (dtv_t));
740 if (newp == NULL)
741 oom ();
742 _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
743 }
744 else
745 {
746 newp = realloc (&dtv[-1],
747 (2 + newsize) * sizeof (dtv_t));
748 if (newp == NULL)
749 oom ();
750 }
751
752 newp[0].counter = newsize;
753
754 /* Clear the newly allocated part. */
755 _dl_memset (newp + 2 + oldsize, '\0',
756 (newsize - oldsize) * sizeof (dtv_t));
757
758 /* Point dtv to the generation counter. */
759 dtv = &newp[1];
760
761 /* Install this new dtv in the thread data
762 structures. */
763 INSTALL_NEW_DTV (dtv);
764 }
765
766 /* If there is currently memory allocate for this
767 dtv entry free it. */
768 /* XXX Ideally we will at some point create a memory
769 pool. */
770 if (! dtv[modid].pointer.is_static
771 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
772 /* Note that free is called for NULL is well. We
773 deallocate even if it is this dtv entry we are
774 supposed to load. The reason is that we call
775 memalign and not malloc. */
776 free (dtv[modid].pointer.val);
777
778 /* This module is loaded dynamically- We defer memory
779 allocation. */
780 dtv[modid].pointer.is_static = false;
781 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
782
783 if (modid == req_modid)
784 the_map = map;
785 }
786
787 total += listp->len;
788 }
789 while ((listp = listp->next) != NULL);
790
791 /* This will be the new maximum generation counter. */
792 dtv[0].counter = new_gen;
793 }
794
795 return the_map;
796 }
797
798
799 /* The generic dynamic and local dynamic model cannot be used in
800 statically linked applications. */
801 void *
__tls_get_addr(GET_ADDR_ARGS)802 __tls_get_addr (GET_ADDR_ARGS)
803 {
804 dtv_t *dtv = THREAD_DTV ();
805 struct link_map *the_map = NULL;
806 void *p;
807
808 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
809 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
810
811 p = dtv[GET_ADDR_MODULE].pointer.val;
812
813 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
814 {
815 /* The allocation was deferred. Do it now. */
816 if (the_map == NULL)
817 {
818 /* Find the link map for this module. */
819 size_t idx = GET_ADDR_MODULE;
820 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
821
822 while (idx >= listp->len)
823 {
824 idx -= listp->len;
825 listp = listp->next;
826 }
827
828 the_map = listp->slotinfo[idx].map;
829 }
830
831 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
832 dtv[GET_ADDR_MODULE].pointer.is_static = false;
833 }
834
835 return (char *) p + GET_ADDR_OFFSET;
836 }
837 # endif
838
839
840
841 void _dl_add_to_slotinfo (struct link_map *l);
842 void
_dl_add_to_slotinfo(struct link_map * l)843 _dl_add_to_slotinfo (struct link_map *l)
844 {
845 /* Now that we know the object is loaded successfully add
846 modules containing TLS data to the dtv info table. We
847 might have to increase its size. */
848 struct dtv_slotinfo_list *listp;
849 struct dtv_slotinfo_list *prevp;
850 size_t idx = l->l_tls_modid;
851
852 /* Find the place in the dtv slotinfo list. */
853 listp = GL(dl_tls_dtv_slotinfo_list);
854 prevp = NULL; /* Needed to shut up gcc. */
855 do
856 {
857 /* Does it fit in the array of this list element? */
858 if (idx < listp->len)
859 break;
860 idx -= listp->len;
861 prevp = listp;
862 listp = listp->next;
863 }
864 while (listp != NULL);
865
866 if (listp == NULL)
867 {
868 /* When we come here it means we have to add a new element
869 to the slotinfo list. And the new module must be in
870 the first slot. */
871 assert (idx == 0);
872
873 listp = prevp->next = (struct dtv_slotinfo_list *)
874 malloc (sizeof (struct dtv_slotinfo_list)
875 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
876 if (listp == NULL)
877 {
878 /* We ran out of memory. We will simply fail this
879 call but don't undo anything we did so far. The
880 application will crash or be terminated anyway very
881 soon. */
882
883 /* We have to do this since some entries in the dtv
884 slotinfo array might already point to this
885 generation. */
886 ++GL(dl_tls_generation);
887
888 _dl_dprintf (_dl_debug_file,
889 "cannot create TLS data structures: ABORT\n");
890 _dl_exit (127);
891 }
892
893 listp->len = TLS_SLOTINFO_SURPLUS;
894 listp->next = NULL;
895 _dl_memset (listp->slotinfo, '\0',
896 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
897 }
898
899 /* Add the information into the slotinfo data structure. */
900 listp->slotinfo[idx].map = l;
901 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
902 }
903