1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019, Linaro Limited
4 * Copyright (c) 2020, Arm Limited
5 */
6
7 #include <assert.h>
8 #include <config.h>
9 #include <confine_array_index.h>
10 #include <ctype.h>
11 #include <elf32.h>
12 #include <elf64.h>
13 #include <elf_common.h>
14 #include <ldelf.h>
15 #include <link.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string_ext.h>
19 #include <string.h>
20 #include <tee_api_types.h>
21 #include <tee_internal_api_extensions.h>
22 #include <unw/unwind.h>
23 #include <user_ta_header.h>
24 #include <util.h>
25
26 #include "sys.h"
27 #include "ta_elf.h"
28
29 /*
30 * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit
31 * TA
32 */
33 struct dl_phdr_info32 {
34 uint32_t dlpi_addr;
35 uint32_t dlpi_name;
36 uint32_t dlpi_phdr;
37 uint16_t dlpi_phnum;
38 uint64_t dlpi_adds;
39 uint64_t dlpi_subs;
40 uint32_t dlpi_tls_modid;
41 uint32_t dlpi_tls_data;
42 };
43
44 static vaddr_t ta_stack;
45 static vaddr_t ta_stack_size;
46
47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
48
49 /*
50 * Main application is always ID 1, shared libraries with TLS take IDs 2 and
51 * above
52 */
assign_tls_mod_id(struct ta_elf * elf)53 static void assign_tls_mod_id(struct ta_elf *elf)
54 {
55 static size_t last_tls_mod_id = 1;
56
57 if (elf->is_main)
58 assert(last_tls_mod_id == 1); /* Main always comes first */
59 elf->tls_mod_id = last_tls_mod_id++;
60 }
61
queue_elf_helper(const TEE_UUID * uuid)62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
63 {
64 struct ta_elf *elf = calloc(1, sizeof(*elf));
65
66 if (!elf)
67 return NULL;
68
69 TAILQ_INIT(&elf->segs);
70
71 elf->uuid = *uuid;
72 TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
73 return elf;
74 }
75
queue_elf(const TEE_UUID * uuid)76 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
77 {
78 struct ta_elf *elf = ta_elf_find_elf(uuid);
79
80 if (elf)
81 return NULL;
82
83 elf = queue_elf_helper(uuid);
84 if (!elf)
85 err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
86
87 return elf;
88 }
89
ta_elf_find_elf(const TEE_UUID * uuid)90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
91 {
92 struct ta_elf *elf = NULL;
93
94 TAILQ_FOREACH(elf, &main_elf_queue, link)
95 if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
96 return elf;
97
98 return NULL;
99 }
100
101 #if defined(ARM32) || defined(ARM64)
e32_parse_ehdr(struct ta_elf * elf,Elf32_Ehdr * ehdr)102 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
103 {
104 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
105 ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
106 ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
107 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
108 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
109 (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
110 #ifndef CFG_WITH_VFP
111 (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
112 #endif
113 ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
114 ehdr->e_shentsize != sizeof(Elf32_Shdr))
115 return TEE_ERROR_BAD_FORMAT;
116
117 elf->is_32bit = true;
118 elf->e_entry = ehdr->e_entry;
119 elf->e_phoff = ehdr->e_phoff;
120 elf->e_shoff = ehdr->e_shoff;
121 elf->e_phnum = ehdr->e_phnum;
122 elf->e_shnum = ehdr->e_shnum;
123 elf->e_phentsize = ehdr->e_phentsize;
124 elf->e_shentsize = ehdr->e_shentsize;
125
126 return TEE_SUCCESS;
127 }
128
129 #ifdef ARM64
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)130 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
131 {
132 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
133 ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
134 ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
135 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
136 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
137 ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
138 ehdr->e_shentsize != sizeof(Elf64_Shdr))
139 return TEE_ERROR_BAD_FORMAT;
140
141
142 elf->is_32bit = false;
143 elf->e_entry = ehdr->e_entry;
144 elf->e_phoff = ehdr->e_phoff;
145 elf->e_shoff = ehdr->e_shoff;
146 elf->e_phnum = ehdr->e_phnum;
147 elf->e_shnum = ehdr->e_shnum;
148 elf->e_phentsize = ehdr->e_phentsize;
149 elf->e_shentsize = ehdr->e_shentsize;
150
151 return TEE_SUCCESS;
152 }
153 #else /*ARM64*/
e64_parse_ehdr(struct ta_elf * elf __unused,Elf64_Ehdr * ehdr __unused)154 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
155 Elf64_Ehdr *ehdr __unused)
156 {
157 return TEE_ERROR_NOT_SUPPORTED;
158 }
159 #endif /*ARM64*/
160 #endif /* ARM32 || ARM64 */
161
162 #if defined(RV64)
e32_parse_ehdr(struct ta_elf * elf __unused,Elf32_Ehdr * ehdr __unused)163 static TEE_Result e32_parse_ehdr(struct ta_elf *elf __unused,
164 Elf32_Ehdr *ehdr __unused)
165 {
166 return TEE_ERROR_BAD_FORMAT;
167 }
168
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)169 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
170 {
171 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
172 ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
173 ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
174 ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
175 ehdr->e_type != ET_DYN || ehdr->e_machine != EM_RISCV ||
176 ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
177 ehdr->e_shentsize != sizeof(Elf64_Shdr))
178 return TEE_ERROR_BAD_FORMAT;
179
180 elf->is_32bit = false;
181 elf->e_entry = ehdr->e_entry;
182 elf->e_phoff = ehdr->e_phoff;
183 elf->e_shoff = ehdr->e_shoff;
184 elf->e_phnum = ehdr->e_phnum;
185 elf->e_shnum = ehdr->e_shnum;
186 elf->e_phentsize = ehdr->e_phentsize;
187 elf->e_shentsize = ehdr->e_shentsize;
188
189 return TEE_SUCCESS;
190 }
191 #endif /* RV64 */
192
check_phdr_in_range(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)193 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
194 vaddr_t addr, size_t memsz)
195 {
196 vaddr_t max_addr = 0;
197
198 if (ADD_OVERFLOW(addr, memsz, &max_addr))
199 err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
200
201 /*
202 * elf->load_addr and elf->max_addr are both using the
203 * final virtual addresses, while this program header is
204 * relative to 0.
205 */
206 if (max_addr > elf->max_addr - elf->load_addr)
207 err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
208 type);
209 }
210
read_dyn(struct ta_elf * elf,vaddr_t addr,size_t idx,unsigned int * tag,size_t * val)211 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
212 size_t idx, unsigned int *tag, size_t *val)
213 {
214 if (elf->is_32bit) {
215 Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
216
217 *tag = dyn[idx].d_tag;
218 *val = dyn[idx].d_un.d_val;
219 } else {
220 Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
221
222 *tag = dyn[idx].d_tag;
223 *val = dyn[idx].d_un.d_val;
224 }
225 }
226
check_range(struct ta_elf * elf,const char * name,const void * ptr,size_t sz)227 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
228 size_t sz)
229 {
230 size_t max_addr = 0;
231
232 if ((vaddr_t)ptr < elf->load_addr)
233 err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr);
234
235 if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
236 err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name);
237
238 if (max_addr > elf->max_addr)
239 err(TEE_ERROR_BAD_FORMAT,
240 "%s %p..%#zx out of range", name, ptr, max_addr);
241 }
242
check_hashtab(struct ta_elf * elf,void * ptr,size_t num_buckets,size_t num_chains)243 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
244 size_t num_chains)
245 {
246 /*
247 * Starting from 2 as the first two words are mandatory and hold
248 * num_buckets and num_chains. So this function is called twice,
249 * first to see that there's indeed room for num_buckets and
250 * num_chains and then to see that all of it fits.
251 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
252 */
253 size_t num_words = 2;
254 size_t sz = 0;
255
256 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
257 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr);
258
259 if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
260 ADD_OVERFLOW(num_words, num_chains, &num_words) ||
261 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
262 err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow");
263
264 check_range(elf, "DT_HASH", ptr, sz);
265 }
266
check_gnu_hashtab(struct ta_elf * elf,void * ptr)267 static void check_gnu_hashtab(struct ta_elf *elf, void *ptr)
268 {
269 struct gnu_hashtab *h = ptr;
270 size_t num_words = 4; /* nbuckets, symoffset, bloom_size, bloom_shift */
271 size_t bloom_words = 0;
272 size_t sz = 0;
273
274 if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
275 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_GNU_HASH %p",
276 ptr);
277
278 if (elf->gnu_hashtab_size < sizeof(*h))
279 err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH too small");
280
281 /* Check validity of h->nbuckets and h->bloom_size */
282
283 if (elf->is_32bit)
284 bloom_words = h->bloom_size;
285 else
286 bloom_words = h->bloom_size * 2;
287 if (ADD_OVERFLOW(num_words, h->nbuckets, &num_words) ||
288 ADD_OVERFLOW(num_words, bloom_words, &num_words) ||
289 MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) ||
290 sz > elf->gnu_hashtab_size)
291 err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH overflow");
292 }
293
save_hashtab(struct ta_elf * elf)294 static void save_hashtab(struct ta_elf *elf)
295 {
296 uint32_t *hashtab = NULL;
297 size_t n = 0;
298
299 if (elf->is_32bit) {
300 Elf32_Shdr *shdr = elf->shdr;
301
302 for (n = 0; n < elf->e_shnum; n++) {
303 void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
304 elf->load_addr);
305
306 if (shdr[n].sh_type == SHT_HASH) {
307 elf->hashtab = addr;
308 } else if (shdr[n].sh_type == SHT_GNU_HASH) {
309 elf->gnu_hashtab = addr;
310 elf->gnu_hashtab_size = shdr[n].sh_size;
311 }
312 }
313 } else {
314 Elf64_Shdr *shdr = elf->shdr;
315
316 for (n = 0; n < elf->e_shnum; n++) {
317 void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
318 elf->load_addr);
319
320 if (shdr[n].sh_type == SHT_HASH) {
321 elf->hashtab = addr;
322 } else if (shdr[n].sh_type == SHT_GNU_HASH) {
323 elf->gnu_hashtab = addr;
324 elf->gnu_hashtab_size = shdr[n].sh_size;
325 }
326 }
327 }
328
329 if (elf->hashtab) {
330 check_hashtab(elf, elf->hashtab, 0, 0);
331 hashtab = elf->hashtab;
332 check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
333 }
334 if (elf->gnu_hashtab)
335 check_gnu_hashtab(elf, elf->gnu_hashtab);
336 }
337
save_soname_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)338 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type,
339 vaddr_t addr, size_t memsz)
340 {
341 size_t dyn_entsize = 0;
342 size_t num_dyns = 0;
343 size_t n = 0;
344 unsigned int tag = 0;
345 size_t val = 0;
346 char *str_tab = NULL;
347
348 if (type != PT_DYNAMIC)
349 return;
350
351 if (elf->is_32bit)
352 dyn_entsize = sizeof(Elf32_Dyn);
353 else
354 dyn_entsize = sizeof(Elf64_Dyn);
355
356 assert(!(memsz % dyn_entsize));
357 num_dyns = memsz / dyn_entsize;
358
359 for (n = 0; n < num_dyns; n++) {
360 read_dyn(elf, addr, n, &tag, &val);
361 if (tag == DT_STRTAB) {
362 str_tab = (char *)(val + elf->load_addr);
363 break;
364 }
365 }
366 for (n = 0; n < num_dyns; n++) {
367 read_dyn(elf, addr, n, &tag, &val);
368 if (tag == DT_SONAME) {
369 elf->soname = str_tab + val;
370 break;
371 }
372 }
373 }
374
save_soname(struct ta_elf * elf)375 static void save_soname(struct ta_elf *elf)
376 {
377 size_t n = 0;
378
379 if (elf->is_32bit) {
380 Elf32_Phdr *phdr = elf->phdr;
381
382 for (n = 0; n < elf->e_phnum; n++)
383 save_soname_from_segment(elf, phdr[n].p_type,
384 phdr[n].p_vaddr,
385 phdr[n].p_memsz);
386 } else {
387 Elf64_Phdr *phdr = elf->phdr;
388
389 for (n = 0; n < elf->e_phnum; n++)
390 save_soname_from_segment(elf, phdr[n].p_type,
391 phdr[n].p_vaddr,
392 phdr[n].p_memsz);
393 }
394 }
395
e32_save_symtab(struct ta_elf * elf,size_t tab_idx)396 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
397 {
398 Elf32_Shdr *shdr = elf->shdr;
399 size_t str_idx = shdr[tab_idx].sh_link;
400
401 elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
402 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym))
403 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p",
404 elf->dynsymtab);
405 check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
406
407 if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
408 err(TEE_ERROR_BAD_FORMAT,
409 "Size of dynsymtab not an even multiple of Elf32_Sym");
410 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
411
412 if (str_idx >= elf->e_shnum)
413 err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range");
414 elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
415 check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
416
417 elf->dynstr_size = shdr[str_idx].sh_size;
418 }
419
e64_save_symtab(struct ta_elf * elf,size_t tab_idx)420 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
421 {
422 Elf64_Shdr *shdr = elf->shdr;
423 size_t str_idx = shdr[tab_idx].sh_link;
424
425 elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
426 elf->load_addr);
427
428 if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym))
429 err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p",
430 elf->dynsymtab);
431 check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab,
432 shdr[tab_idx].sh_size);
433
434 if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
435 err(TEE_ERROR_BAD_FORMAT,
436 "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym");
437 elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
438
439 if (str_idx >= elf->e_shnum)
440 err(TEE_ERROR_BAD_FORMAT,
441 ".dynstr/STRTAB section index out of range");
442 elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
443 check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size);
444
445 elf->dynstr_size = shdr[str_idx].sh_size;
446 }
447
save_symtab(struct ta_elf * elf)448 static void save_symtab(struct ta_elf *elf)
449 {
450 size_t n = 0;
451
452 if (elf->is_32bit) {
453 Elf32_Shdr *shdr = elf->shdr;
454
455 for (n = 0; n < elf->e_shnum; n++) {
456 if (shdr[n].sh_type == SHT_DYNSYM) {
457 e32_save_symtab(elf, n);
458 break;
459 }
460 }
461 } else {
462 Elf64_Shdr *shdr = elf->shdr;
463
464 for (n = 0; n < elf->e_shnum; n++) {
465 if (shdr[n].sh_type == SHT_DYNSYM) {
466 e64_save_symtab(elf, n);
467 break;
468 }
469 }
470
471 }
472
473 save_hashtab(elf);
474 save_soname(elf);
475 }
476
init_elf(struct ta_elf * elf)477 static void init_elf(struct ta_elf *elf)
478 {
479 TEE_Result res = TEE_SUCCESS;
480 vaddr_t va = 0;
481 uint32_t flags = LDELF_MAP_FLAG_SHAREABLE;
482 size_t sz = 0;
483
484 res = sys_open_ta_bin(&elf->uuid, &elf->handle);
485 if (res)
486 err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
487
488 /*
489 * Map it read-only executable when we're loading a library where
490 * the ELF header is included in a load segment.
491 */
492 if (!elf->is_main)
493 flags |= LDELF_MAP_FLAG_EXECUTABLE;
494 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
495 if (res)
496 err(res, "sys_map_ta_bin");
497 elf->ehdr_addr = va;
498 if (!elf->is_main) {
499 elf->load_addr = va;
500 elf->max_addr = va + SMALL_PAGE_SIZE;
501 elf->max_offs = SMALL_PAGE_SIZE;
502 }
503
504 if (!IS_ELF(*(Elf32_Ehdr *)va))
505 err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
506
507 res = e32_parse_ehdr(elf, (void *)va);
508 if (res == TEE_ERROR_BAD_FORMAT)
509 res = e64_parse_ehdr(elf, (void *)va);
510 if (res)
511 err(res, "Cannot parse ELF");
512
513 if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
514 ADD_OVERFLOW(sz, elf->e_phoff, &sz))
515 err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
516
517 if (sz > SMALL_PAGE_SIZE)
518 err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
519
520 elf->phdr = (void *)(va + elf->e_phoff);
521 }
522
roundup(size_t v)523 static size_t roundup(size_t v)
524 {
525 return ROUNDUP(v, SMALL_PAGE_SIZE);
526 }
527
rounddown(size_t v)528 static size_t rounddown(size_t v)
529 {
530 return ROUNDDOWN(v, SMALL_PAGE_SIZE);
531 }
532
add_segment(struct ta_elf * elf,size_t offset,size_t vaddr,size_t filesz,size_t memsz,size_t flags,size_t align)533 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
534 size_t filesz, size_t memsz, size_t flags, size_t align)
535 {
536 struct segment *seg = calloc(1, sizeof(*seg));
537
538 if (!seg)
539 err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
540
541 if (memsz < filesz)
542 err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
543
544 seg->offset = offset;
545 seg->vaddr = vaddr;
546 seg->filesz = filesz;
547 seg->memsz = memsz;
548 seg->flags = flags;
549 seg->align = align;
550
551 TAILQ_INSERT_TAIL(&elf->segs, seg, link);
552 }
553
parse_load_segments(struct ta_elf * elf)554 static void parse_load_segments(struct ta_elf *elf)
555 {
556 size_t n = 0;
557
558 if (elf->is_32bit) {
559 Elf32_Phdr *phdr = elf->phdr;
560
561 for (n = 0; n < elf->e_phnum; n++)
562 if (phdr[n].p_type == PT_LOAD) {
563 add_segment(elf, phdr[n].p_offset,
564 phdr[n].p_vaddr, phdr[n].p_filesz,
565 phdr[n].p_memsz, phdr[n].p_flags,
566 phdr[n].p_align);
567 } else if (phdr[n].p_type == PT_ARM_EXIDX) {
568 elf->exidx_start = phdr[n].p_vaddr;
569 elf->exidx_size = phdr[n].p_filesz;
570 } else if (phdr[n].p_type == PT_TLS) {
571 assign_tls_mod_id(elf);
572 }
573 } else {
574 Elf64_Phdr *phdr = elf->phdr;
575
576 for (n = 0; n < elf->e_phnum; n++)
577 if (phdr[n].p_type == PT_LOAD) {
578 add_segment(elf, phdr[n].p_offset,
579 phdr[n].p_vaddr, phdr[n].p_filesz,
580 phdr[n].p_memsz, phdr[n].p_flags,
581 phdr[n].p_align);
582 } else if (phdr[n].p_type == PT_TLS) {
583 elf->tls_start = phdr[n].p_vaddr;
584 elf->tls_filesz = phdr[n].p_filesz;
585 elf->tls_memsz = phdr[n].p_memsz;
586 } else if (IS_ENABLED(CFG_TA_BTI) &&
587 phdr[n].p_type == PT_GNU_PROPERTY) {
588 elf->prop_start = phdr[n].p_vaddr;
589 elf->prop_align = phdr[n].p_align;
590 elf->prop_memsz = phdr[n].p_memsz;
591 }
592 }
593 }
594
copy_remapped_to(struct ta_elf * elf,const struct segment * seg)595 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
596 {
597 uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
598 size_t n = 0;
599 size_t offs = seg->offset;
600 size_t num_bytes = seg->filesz;
601
602 if (offs < elf->max_offs) {
603 n = MIN(elf->max_offs - offs, num_bytes);
604 memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
605 dst += n;
606 offs += n;
607 num_bytes -= n;
608 }
609
610 if (num_bytes) {
611 TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
612 elf->handle, offs);
613
614 if (res)
615 err(res, "sys_copy_from_ta_bin");
616 elf->max_offs += offs;
617 }
618 }
619
adjust_segments(struct ta_elf * elf)620 static void adjust_segments(struct ta_elf *elf)
621 {
622 struct segment *seg = NULL;
623 struct segment *prev_seg = NULL;
624 size_t prev_end_addr = 0;
625 size_t align = 0;
626 size_t mask = 0;
627
628 /* Sanity check */
629 TAILQ_FOREACH(seg, &elf->segs, link) {
630 size_t dummy __maybe_unused = 0;
631
632 assert(seg->align >= SMALL_PAGE_SIZE);
633 assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
634 assert(seg->filesz <= seg->memsz);
635 assert((seg->offset & SMALL_PAGE_MASK) ==
636 (seg->vaddr & SMALL_PAGE_MASK));
637
638 prev_seg = TAILQ_PREV(seg, segment_head, link);
639 if (prev_seg) {
640 assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
641 assert(seg->offset >=
642 prev_seg->offset + prev_seg->filesz);
643 }
644 if (!align)
645 align = seg->align;
646 assert(align == seg->align);
647 }
648
649 mask = align - 1;
650
651 seg = TAILQ_FIRST(&elf->segs);
652 if (seg)
653 seg = TAILQ_NEXT(seg, link);
654 while (seg) {
655 prev_seg = TAILQ_PREV(seg, segment_head, link);
656 prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
657
658 /*
659 * This segment may overlap with the last "page" in the
660 * previous segment in two different ways:
661 * 1. Virtual address (and offset) overlaps =>
662 * Permissions needs to be merged. The offset must have
663 * the SMALL_PAGE_MASK bits set as vaddr and offset must
664 * add up with prevsion segment.
665 *
666 * 2. Only offset overlaps =>
667 * The same page in the ELF is mapped at two different
668 * virtual addresses. As a limitation this segment must
669 * be mapped as writeable.
670 */
671
672 /* Case 1. */
673 if (rounddown(seg->vaddr) < prev_end_addr) {
674 assert((seg->vaddr & mask) == (seg->offset & mask));
675 assert(prev_seg->memsz == prev_seg->filesz);
676
677 /*
678 * Merge the segments and their permissions.
679 * Note that the may be a small hole between the
680 * two sections.
681 */
682 prev_seg->filesz = seg->vaddr + seg->filesz -
683 prev_seg->vaddr;
684 prev_seg->memsz = seg->vaddr + seg->memsz -
685 prev_seg->vaddr;
686 prev_seg->flags |= seg->flags;
687
688 TAILQ_REMOVE(&elf->segs, seg, link);
689 free(seg);
690 seg = TAILQ_NEXT(prev_seg, link);
691 continue;
692 }
693
694 /* Case 2. */
695 if ((seg->offset & mask) &&
696 rounddown(seg->offset) <
697 (prev_seg->offset + prev_seg->filesz)) {
698
699 assert(seg->flags & PF_W);
700 seg->remapped_writeable = true;
701 }
702
703 /*
704 * No overlap, but we may need to align address, offset and
705 * size.
706 */
707 seg->filesz += seg->vaddr - rounddown(seg->vaddr);
708 seg->memsz += seg->vaddr - rounddown(seg->vaddr);
709 seg->vaddr = rounddown(seg->vaddr);
710 seg->offset = rounddown(seg->offset);
711 seg = TAILQ_NEXT(seg, link);
712 }
713
714 }
715
populate_segments_legacy(struct ta_elf * elf)716 static void populate_segments_legacy(struct ta_elf *elf)
717 {
718 TEE_Result res = TEE_SUCCESS;
719 struct segment *seg = NULL;
720 vaddr_t va = 0;
721
722 assert(elf->is_legacy);
723 TAILQ_FOREACH(seg, &elf->segs, link) {
724 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
725 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
726 seg->vaddr - seg->memsz);
727 size_t num_bytes = roundup(seg->memsz);
728
729 if (!elf->load_addr)
730 va = 0;
731 else
732 va = seg->vaddr + elf->load_addr;
733
734
735 if (!(seg->flags & PF_R))
736 err(TEE_ERROR_NOT_SUPPORTED,
737 "Segment must be readable");
738
739 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
740 if (res)
741 err(res, "sys_map_zi");
742 res = sys_copy_from_ta_bin((void *)va, seg->filesz,
743 elf->handle, seg->offset);
744 if (res)
745 err(res, "sys_copy_from_ta_bin");
746
747 if (!elf->load_addr)
748 elf->load_addr = va;
749 elf->max_addr = va + num_bytes;
750 elf->max_offs = seg->offset + seg->filesz;
751 }
752 }
753
get_pad_begin(void)754 static size_t get_pad_begin(void)
755 {
756 #ifdef CFG_TA_ASLR
757 size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
758 size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
759 TEE_Result res = TEE_SUCCESS;
760 uint32_t rnd32 = 0;
761 size_t rnd = 0;
762
763 COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
764 CFG_TA_ASLR_MAX_OFFSET_PAGES);
765 if (max > min) {
766 res = sys_gen_random_num(&rnd32, sizeof(rnd32));
767 if (res) {
768 DMSG("Random read failed: %#"PRIx32, res);
769 return min * SMALL_PAGE_SIZE;
770 }
771 rnd = rnd32 % (max - min);
772 }
773
774 return (min + rnd) * SMALL_PAGE_SIZE;
775 #else /*!CFG_TA_ASLR*/
776 return 0;
777 #endif /*!CFG_TA_ASLR*/
778 }
779
populate_segments(struct ta_elf * elf)780 static void populate_segments(struct ta_elf *elf)
781 {
782 TEE_Result res = TEE_SUCCESS;
783 struct segment *seg = NULL;
784 vaddr_t va = 0;
785 size_t pad_begin = 0;
786
787 assert(!elf->is_legacy);
788 TAILQ_FOREACH(seg, &elf->segs, link) {
789 struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
790 size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
791 seg->vaddr - seg->memsz);
792
793 if (seg->remapped_writeable) {
794 size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
795 rounddown(seg->vaddr);
796
797 assert(elf->load_addr);
798 va = rounddown(elf->load_addr + seg->vaddr);
799 assert(va >= elf->max_addr);
800 res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
801 if (res)
802 err(res, "sys_map_zi");
803
804 copy_remapped_to(elf, seg);
805 elf->max_addr = va + num_bytes;
806 } else {
807 uint32_t flags = 0;
808 size_t filesz = seg->filesz;
809 size_t memsz = seg->memsz;
810 size_t offset = seg->offset;
811 size_t vaddr = seg->vaddr;
812
813 if (offset < elf->max_offs) {
814 /*
815 * We're in a load segment which overlaps
816 * with (or is covered by) the first page
817 * of a shared library.
818 */
819 if (vaddr + filesz < SMALL_PAGE_SIZE) {
820 size_t num_bytes = 0;
821
822 /*
823 * If this segment is completely
824 * covered, take next.
825 */
826 if (vaddr + memsz <= SMALL_PAGE_SIZE)
827 continue;
828
829 /*
830 * All data of the segment is
831 * loaded, but we need to zero
832 * extend it.
833 */
834 va = elf->max_addr;
835 num_bytes = roundup(vaddr + memsz) -
836 roundup(vaddr) -
837 SMALL_PAGE_SIZE;
838 assert(num_bytes);
839 res = sys_map_zi(num_bytes, 0, &va, 0,
840 0);
841 if (res)
842 err(res, "sys_map_zi");
843 elf->max_addr = roundup(va + num_bytes);
844 continue;
845 }
846
847 /* Partial overlap, remove the first page. */
848 vaddr += SMALL_PAGE_SIZE;
849 filesz -= SMALL_PAGE_SIZE;
850 memsz -= SMALL_PAGE_SIZE;
851 offset += SMALL_PAGE_SIZE;
852 }
853
854 if (!elf->load_addr) {
855 va = 0;
856 pad_begin = get_pad_begin();
857 /*
858 * If mapping with pad_begin fails we'll
859 * retry without pad_begin, effectively
860 * disabling ASLR for the current ELF file.
861 */
862 } else {
863 va = vaddr + elf->load_addr;
864 pad_begin = 0;
865 }
866
867 if (seg->flags & PF_W)
868 flags |= LDELF_MAP_FLAG_WRITEABLE;
869 else
870 flags |= LDELF_MAP_FLAG_SHAREABLE;
871 if (seg->flags & PF_X)
872 flags |= LDELF_MAP_FLAG_EXECUTABLE;
873 if (!(seg->flags & PF_R))
874 err(TEE_ERROR_NOT_SUPPORTED,
875 "Segment must be readable");
876 if (flags & LDELF_MAP_FLAG_WRITEABLE) {
877 res = sys_map_zi(memsz, 0, &va, pad_begin,
878 pad_end);
879 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
880 res = sys_map_zi(memsz, 0, &va, 0,
881 pad_end);
882 if (res)
883 err(res, "sys_map_zi");
884 res = sys_copy_from_ta_bin((void *)va, filesz,
885 elf->handle, offset);
886 if (res)
887 err(res, "sys_copy_from_ta_bin");
888 } else {
889 if (filesz != memsz)
890 err(TEE_ERROR_BAD_FORMAT,
891 "Filesz and memsz mismatch");
892 res = sys_map_ta_bin(&va, filesz, flags,
893 elf->handle, offset,
894 pad_begin, pad_end);
895 if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
896 res = sys_map_ta_bin(&va, filesz, flags,
897 elf->handle,
898 offset, 0,
899 pad_end);
900 if (res)
901 err(res, "sys_map_ta_bin");
902 }
903
904 if (!elf->load_addr)
905 elf->load_addr = va;
906 elf->max_addr = roundup(va + memsz);
907 elf->max_offs += filesz;
908 }
909 }
910 }
911
ta_elf_add_bti(struct ta_elf * elf)912 static void ta_elf_add_bti(struct ta_elf *elf)
913 {
914 TEE_Result res = TEE_SUCCESS;
915 struct segment *seg = NULL;
916 uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI;
917
918 TAILQ_FOREACH(seg, &elf->segs, link) {
919 vaddr_t va = elf->load_addr + seg->vaddr;
920
921 if (seg->flags & PF_X) {
922 res = sys_set_prot(va, seg->memsz, flags);
923 if (res)
924 err(res, "sys_set_prot");
925 }
926 }
927 }
928
parse_property_segment(struct ta_elf * elf)929 static void parse_property_segment(struct ta_elf *elf)
930 {
931 char *desc = NULL;
932 size_t align = elf->prop_align;
933 size_t desc_offset = 0;
934 size_t prop_offset = 0;
935 vaddr_t va = 0;
936 Elf_Note *note = NULL;
937 char *name = NULL;
938
939 if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start)
940 return;
941
942 check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start,
943 elf->prop_memsz);
944
945 va = elf->load_addr + elf->prop_start;
946 note = (void *)va;
947 name = (char *)(note + 1);
948
949 if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU))
950 return;
951
952 if (note->n_type != NT_GNU_PROPERTY_TYPE_0 ||
953 note->n_namesz != sizeof(ELF_NOTE_GNU) ||
954 memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) ||
955 !IS_POWER_OF_TWO(align))
956 return;
957
958 desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align);
959
960 if (desc_offset > elf->prop_memsz ||
961 ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz)
962 return;
963
964 desc = (char *)(va + desc_offset);
965
966 do {
967 Elf_Prop *prop = (void *)(desc + prop_offset);
968 size_t data_offset = prop_offset + sizeof(*prop);
969
970 if (note->n_descsz < data_offset)
971 return;
972
973 data_offset = confine_array_index(data_offset, note->n_descsz);
974
975 if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
976 uint32_t *pr_data = (void *)(desc + data_offset);
977
978 if (note->n_descsz < (data_offset + sizeof(*pr_data)) &&
979 prop->pr_datasz != sizeof(*pr_data))
980 return;
981
982 if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) {
983 DMSG("BTI Feature present in note property");
984 elf->bti_enabled = true;
985 }
986 }
987
988 prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align);
989 } while (prop_offset < note->n_descsz);
990 }
991
map_segments(struct ta_elf * elf)992 static void map_segments(struct ta_elf *elf)
993 {
994 TEE_Result res = TEE_SUCCESS;
995
996 parse_load_segments(elf);
997 adjust_segments(elf);
998 if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
999 vaddr_t va = 0;
1000 size_t sz = elf->max_addr - elf->load_addr;
1001 struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
1002 size_t pad_begin = get_pad_begin();
1003
1004 /*
1005 * We're loading a library, if not other parts of the code
1006 * need to be updated too.
1007 */
1008 assert(!elf->is_main);
1009
1010 /*
1011 * Now that we know how much virtual memory is needed move
1012 * the already mapped part to a location which can
1013 * accommodate us.
1014 */
1015 res = sys_remap(elf->load_addr, &va, sz, pad_begin,
1016 roundup(seg->vaddr + seg->memsz));
1017 if (res == TEE_ERROR_OUT_OF_MEMORY)
1018 res = sys_remap(elf->load_addr, &va, sz, 0,
1019 roundup(seg->vaddr + seg->memsz));
1020 if (res)
1021 err(res, "sys_remap");
1022 elf->ehdr_addr = va;
1023 elf->load_addr = va;
1024 elf->max_addr = va + sz;
1025 elf->phdr = (void *)(va + elf->e_phoff);
1026 }
1027 }
1028
add_deps_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)1029 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
1030 vaddr_t addr, size_t memsz)
1031 {
1032 size_t dyn_entsize = 0;
1033 size_t num_dyns = 0;
1034 size_t n = 0;
1035 unsigned int tag = 0;
1036 size_t val = 0;
1037 TEE_UUID uuid = { };
1038 char *str_tab = NULL;
1039 size_t str_tab_sz = 0;
1040
1041 if (type != PT_DYNAMIC)
1042 return;
1043
1044 check_phdr_in_range(elf, type, addr, memsz);
1045
1046 if (elf->is_32bit)
1047 dyn_entsize = sizeof(Elf32_Dyn);
1048 else
1049 dyn_entsize = sizeof(Elf64_Dyn);
1050
1051 assert(!(memsz % dyn_entsize));
1052 num_dyns = memsz / dyn_entsize;
1053
1054 for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
1055 read_dyn(elf, addr, n, &tag, &val);
1056 if (tag == DT_STRTAB)
1057 str_tab = (char *)(val + elf->load_addr);
1058 else if (tag == DT_STRSZ)
1059 str_tab_sz = val;
1060 }
1061 check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz);
1062
1063 for (n = 0; n < num_dyns; n++) {
1064 read_dyn(elf, addr, n, &tag, &val);
1065 if (tag != DT_NEEDED)
1066 continue;
1067 if (val >= str_tab_sz)
1068 err(TEE_ERROR_BAD_FORMAT,
1069 "Offset into .dynstr/STRTAB out of range");
1070 tee_uuid_from_str(&uuid, str_tab + val);
1071 queue_elf(&uuid);
1072 }
1073 }
1074
add_dependencies(struct ta_elf * elf)1075 static void add_dependencies(struct ta_elf *elf)
1076 {
1077 size_t n = 0;
1078
1079 if (elf->is_32bit) {
1080 Elf32_Phdr *phdr = elf->phdr;
1081
1082 for (n = 0; n < elf->e_phnum; n++)
1083 add_deps_from_segment(elf, phdr[n].p_type,
1084 phdr[n].p_vaddr, phdr[n].p_memsz);
1085 } else {
1086 Elf64_Phdr *phdr = elf->phdr;
1087
1088 for (n = 0; n < elf->e_phnum; n++)
1089 add_deps_from_segment(elf, phdr[n].p_type,
1090 phdr[n].p_vaddr, phdr[n].p_memsz);
1091 }
1092 }
1093
copy_section_headers(struct ta_elf * elf)1094 static void copy_section_headers(struct ta_elf *elf)
1095 {
1096 TEE_Result res = TEE_SUCCESS;
1097 size_t sz = 0;
1098 size_t offs = 0;
1099
1100 if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
1101 err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow");
1102
1103 elf->shdr = malloc(sz);
1104 if (!elf->shdr)
1105 err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
1106
1107 /*
1108 * We're assuming that section headers comes after the load segments,
1109 * but if it's a very small dynamically linked library the section
1110 * headers can still end up (partially?) in the first mapped page.
1111 */
1112 if (elf->e_shoff < SMALL_PAGE_SIZE) {
1113 assert(!elf->is_main);
1114 offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
1115 memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
1116 offs);
1117 }
1118
1119 if (offs < sz) {
1120 res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
1121 sz - offs, elf->handle,
1122 elf->e_shoff + offs);
1123 if (res)
1124 err(res, "sys_copy_from_ta_bin");
1125 }
1126 }
1127
close_handle(struct ta_elf * elf)1128 static void close_handle(struct ta_elf *elf)
1129 {
1130 TEE_Result res = sys_close_ta_bin(elf->handle);
1131
1132 if (res)
1133 err(res, "sys_close_ta_bin");
1134 elf->handle = -1;
1135 }
1136
clean_elf_load_main(struct ta_elf * elf)1137 static void clean_elf_load_main(struct ta_elf *elf)
1138 {
1139 TEE_Result res = TEE_SUCCESS;
1140
1141 /*
1142 * Clean up from last attempt to load
1143 */
1144 res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
1145 if (res)
1146 err(res, "sys_unmap");
1147
1148 while (!TAILQ_EMPTY(&elf->segs)) {
1149 struct segment *seg = TAILQ_FIRST(&elf->segs);
1150 vaddr_t va = 0;
1151 size_t num_bytes = 0;
1152
1153 va = rounddown(elf->load_addr + seg->vaddr);
1154 if (seg->remapped_writeable)
1155 num_bytes = roundup(seg->vaddr + seg->memsz) -
1156 rounddown(seg->vaddr);
1157 else
1158 num_bytes = seg->memsz;
1159
1160 res = sys_unmap(va, num_bytes);
1161 if (res)
1162 err(res, "sys_unmap");
1163
1164 TAILQ_REMOVE(&elf->segs, seg, link);
1165 free(seg);
1166 }
1167
1168 free(elf->shdr);
1169 memset(&elf->is_32bit, 0,
1170 (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
1171
1172 TAILQ_INIT(&elf->segs);
1173 }
1174
1175 #ifdef ARM64
1176 /*
1177 * Allocates an offset in the TA's Thread Control Block for the TLS segment of
1178 * the @elf module.
1179 */
1180 #define TCB_HEAD_SIZE (2 * sizeof(long))
set_tls_offset(struct ta_elf * elf)1181 static void set_tls_offset(struct ta_elf *elf)
1182 {
1183 static size_t next_offs = TCB_HEAD_SIZE;
1184
1185 if (!elf->tls_start)
1186 return;
1187
1188 /* Module has a TLS segment */
1189 elf->tls_tcb_offs = next_offs;
1190 next_offs += elf->tls_memsz;
1191 }
1192 #else
set_tls_offset(struct ta_elf * elf __unused)1193 static void set_tls_offset(struct ta_elf *elf __unused) {}
1194 #endif
1195
load_main(struct ta_elf * elf)1196 static void load_main(struct ta_elf *elf)
1197 {
1198 init_elf(elf);
1199 map_segments(elf);
1200 populate_segments(elf);
1201 add_dependencies(elf);
1202 copy_section_headers(elf);
1203 save_symtab(elf);
1204 close_handle(elf);
1205 set_tls_offset(elf);
1206 parse_property_segment(elf);
1207 if (elf->bti_enabled)
1208 ta_elf_add_bti(elf);
1209
1210 elf->head = (struct ta_head *)elf->load_addr;
1211 if (elf->head->depr_entry != UINT64_MAX) {
1212 /*
1213 * Legacy TAs sets their entry point in ta_head. For
1214 * non-legacy TAs the entry point of the ELF is set instead
1215 * and leaving the ta_head entry point set to UINT64_MAX to
1216 * indicate that it's not used.
1217 *
1218 * NB, everything before the commit a73b5878c89d ("Replace
1219 * ta_head.entry with elf entry") is considered legacy TAs
1220 * for ldelf.
1221 *
1222 * Legacy TAs cannot be mapped with shared memory segments
1223 * so restart the mapping if it turned out we're loading a
1224 * legacy TA.
1225 */
1226
1227 DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
1228 clean_elf_load_main(elf);
1229 elf->is_legacy = true;
1230 init_elf(elf);
1231 map_segments(elf);
1232 populate_segments_legacy(elf);
1233 add_dependencies(elf);
1234 copy_section_headers(elf);
1235 save_symtab(elf);
1236 close_handle(elf);
1237 elf->head = (struct ta_head *)elf->load_addr;
1238 /*
1239 * Check that the TA is still a legacy TA, if it isn't give
1240 * up now since we're likely under attack.
1241 */
1242 if (elf->head->depr_entry == UINT64_MAX)
1243 err(TEE_ERROR_GENERIC,
1244 "TA %pUl was changed on disk to non-legacy",
1245 (void *)&elf->uuid);
1246 }
1247
1248 }
1249
ta_elf_load_main(const TEE_UUID * uuid,uint32_t * is_32bit,uint64_t * sp,uint32_t * ta_flags)1250 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
1251 uint32_t *ta_flags)
1252 {
1253 struct ta_elf *elf = queue_elf(uuid);
1254 vaddr_t va = 0;
1255 TEE_Result res = TEE_SUCCESS;
1256
1257 assert(elf);
1258 elf->is_main = true;
1259
1260 load_main(elf);
1261
1262 *is_32bit = elf->is_32bit;
1263 res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1264 if (res)
1265 err(res, "sys_map_zi stack");
1266
1267 if (elf->head->flags & ~TA_FLAGS_MASK)
1268 err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1269 elf->head->flags & ~TA_FLAGS_MASK);
1270
1271 *ta_flags = elf->head->flags;
1272 *sp = va + elf->head->stack_size;
1273 ta_stack = va;
1274 ta_stack_size = elf->head->stack_size;
1275 }
1276
ta_elf_finalize_load_main(uint64_t * entry)1277 void ta_elf_finalize_load_main(uint64_t *entry)
1278 {
1279 struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1280 TEE_Result res = TEE_SUCCESS;
1281
1282 assert(elf->is_main);
1283
1284 res = ta_elf_set_init_fini_info_compat(elf->is_32bit);
1285 if (res)
1286 err(res, "ta_elf_set_init_fini_info_compat");
1287 res = ta_elf_set_elf_phdr_info(elf->is_32bit);
1288 if (res)
1289 err(res, "ta_elf_set_elf_phdr_info");
1290
1291 if (elf->is_legacy)
1292 *entry = elf->head->depr_entry;
1293 else
1294 *entry = elf->e_entry + elf->load_addr;
1295 }
1296
1297
ta_elf_load_dependency(struct ta_elf * elf,bool is_32bit)1298 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1299 {
1300 if (elf->is_main)
1301 return;
1302
1303 init_elf(elf);
1304 if (elf->is_32bit != is_32bit)
1305 err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1306 (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1307 is_32bit ? "32" : "64");
1308
1309 map_segments(elf);
1310 populate_segments(elf);
1311 add_dependencies(elf);
1312 copy_section_headers(elf);
1313 save_symtab(elf);
1314 close_handle(elf);
1315 set_tls_offset(elf);
1316 parse_property_segment(elf);
1317 if (elf->bti_enabled)
1318 ta_elf_add_bti(elf);
1319 }
1320
ta_elf_finalize_mappings(struct ta_elf * elf)1321 void ta_elf_finalize_mappings(struct ta_elf *elf)
1322 {
1323 TEE_Result res = TEE_SUCCESS;
1324 struct segment *seg = NULL;
1325
1326 if (!elf->is_legacy)
1327 return;
1328
1329 TAILQ_FOREACH(seg, &elf->segs, link) {
1330 vaddr_t va = elf->load_addr + seg->vaddr;
1331 uint32_t flags = 0;
1332
1333 if (seg->flags & PF_W)
1334 flags |= LDELF_MAP_FLAG_WRITEABLE;
1335 if (seg->flags & PF_X)
1336 flags |= LDELF_MAP_FLAG_EXECUTABLE;
1337
1338 res = sys_set_prot(va, seg->memsz, flags);
1339 if (res)
1340 err(res, "sys_set_prot");
1341 }
1342 }
1343
print_wrapper(void * pctx,print_func_t print_func,const char * fmt,...)1344 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1345 const char *fmt, ...)
1346 {
1347 va_list ap;
1348
1349 va_start(ap, fmt);
1350 print_func(pctx, fmt, ap);
1351 va_end(ap);
1352 }
1353
print_seg(void * pctx,print_func_t print_func,size_t idx __maybe_unused,int elf_idx __maybe_unused,vaddr_t va __maybe_unused,paddr_t pa __maybe_unused,size_t sz __maybe_unused,uint32_t flags)1354 static void print_seg(void *pctx, print_func_t print_func,
1355 size_t idx __maybe_unused, int elf_idx __maybe_unused,
1356 vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1357 size_t sz __maybe_unused, uint32_t flags)
1358 {
1359 int rc __maybe_unused = 0;
1360 int width __maybe_unused = 8;
1361 char desc[14] __maybe_unused = "";
1362 char flags_str[] __maybe_unused = "----";
1363
1364 if (elf_idx > -1) {
1365 rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1366 assert(rc >= 0);
1367 } else {
1368 if (flags & DUMP_MAP_EPHEM) {
1369 rc = snprintf(desc, sizeof(desc), " (param)");
1370 assert(rc >= 0);
1371 }
1372 if (flags & DUMP_MAP_LDELF) {
1373 rc = snprintf(desc, sizeof(desc), " (ldelf)");
1374 assert(rc >= 0);
1375 }
1376 if (va == ta_stack) {
1377 rc = snprintf(desc, sizeof(desc), " (stack)");
1378 assert(rc >= 0);
1379 }
1380 }
1381
1382 if (flags & DUMP_MAP_READ)
1383 flags_str[0] = 'r';
1384 if (flags & DUMP_MAP_WRITE)
1385 flags_str[1] = 'w';
1386 if (flags & DUMP_MAP_EXEC)
1387 flags_str[2] = 'x';
1388 if (flags & DUMP_MAP_SECURE)
1389 flags_str[3] = 's';
1390
1391 print_wrapper(pctx, print_func,
1392 "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1393 idx, width, va, width, pa, sz, flags_str, desc);
1394 }
1395
get_next_in_order(struct ta_elf_queue * elf_queue,struct ta_elf ** elf,struct segment ** seg,size_t * elf_idx)1396 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1397 struct ta_elf **elf, struct segment **seg,
1398 size_t *elf_idx)
1399 {
1400 struct ta_elf *e = NULL;
1401 struct segment *s = NULL;
1402 size_t idx = 0;
1403 vaddr_t va = 0;
1404 struct ta_elf *e2 = NULL;
1405 size_t i2 = 0;
1406
1407 assert(elf && seg && elf_idx);
1408 e = *elf;
1409 s = *seg;
1410 assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1411
1412 if (s) {
1413 s = TAILQ_NEXT(s, link);
1414 if (s) {
1415 *seg = s;
1416 return true;
1417 }
1418 }
1419
1420 if (e)
1421 va = e->load_addr;
1422
1423 /* Find the ELF with next load address */
1424 e = NULL;
1425 TAILQ_FOREACH(e2, elf_queue, link) {
1426 if (e2->load_addr > va) {
1427 if (!e || e2->load_addr < e->load_addr) {
1428 e = e2;
1429 idx = i2;
1430 }
1431 }
1432 i2++;
1433 }
1434 if (!e)
1435 return false;
1436
1437 *elf = e;
1438 *seg = TAILQ_FIRST(&e->segs);
1439 *elf_idx = idx;
1440 return true;
1441 }
1442
ta_elf_print_mappings(void * pctx,print_func_t print_func,struct ta_elf_queue * elf_queue,size_t num_maps,struct dump_map * maps,vaddr_t mpool_base)1443 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1444 struct ta_elf_queue *elf_queue, size_t num_maps,
1445 struct dump_map *maps, vaddr_t mpool_base)
1446 {
1447 struct segment *seg = NULL;
1448 struct ta_elf *elf = NULL;
1449 size_t elf_idx = 0;
1450 size_t idx = 0;
1451 size_t map_idx = 0;
1452
1453 /*
1454 * Loop over all segments and maps, printing virtual address in
1455 * order. Segment has priority if the virtual address is present
1456 * in both map and segment.
1457 */
1458 get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1459 while (true) {
1460 vaddr_t va = -1;
1461 size_t sz = 0;
1462 uint32_t flags = DUMP_MAP_SECURE;
1463 size_t offs = 0;
1464
1465 if (seg) {
1466 va = rounddown(seg->vaddr + elf->load_addr);
1467 sz = roundup(seg->vaddr + seg->memsz) -
1468 rounddown(seg->vaddr);
1469 }
1470
1471 while (map_idx < num_maps && maps[map_idx].va <= va) {
1472 uint32_t f = 0;
1473
1474 /* If there's a match, it should be the same map */
1475 if (maps[map_idx].va == va) {
1476 /*
1477 * In shared libraries the first page is
1478 * mapped separately with the rest of that
1479 * segment following back to back in a
1480 * separate entry.
1481 */
1482 if (map_idx + 1 < num_maps &&
1483 maps[map_idx].sz == SMALL_PAGE_SIZE) {
1484 vaddr_t next_va = maps[map_idx].va +
1485 maps[map_idx].sz;
1486 size_t comb_sz = maps[map_idx].sz +
1487 maps[map_idx + 1].sz;
1488
1489 if (next_va == maps[map_idx + 1].va &&
1490 comb_sz == sz &&
1491 maps[map_idx].flags ==
1492 maps[map_idx + 1].flags) {
1493 /* Skip this and next entry */
1494 map_idx += 2;
1495 continue;
1496 }
1497 }
1498 assert(maps[map_idx].sz == sz);
1499 } else if (maps[map_idx].va < va) {
1500 if (maps[map_idx].va == mpool_base)
1501 f |= DUMP_MAP_LDELF;
1502 print_seg(pctx, print_func, idx, -1,
1503 maps[map_idx].va, maps[map_idx].pa,
1504 maps[map_idx].sz,
1505 maps[map_idx].flags | f);
1506 idx++;
1507 }
1508 map_idx++;
1509 }
1510
1511 if (!seg)
1512 break;
1513
1514 offs = rounddown(seg->offset);
1515 if (seg->flags & PF_R)
1516 flags |= DUMP_MAP_READ;
1517 if (seg->flags & PF_W)
1518 flags |= DUMP_MAP_WRITE;
1519 if (seg->flags & PF_X)
1520 flags |= DUMP_MAP_EXEC;
1521
1522 print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1523 idx++;
1524
1525 if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1526 seg = NULL;
1527 }
1528
1529 elf_idx = 0;
1530 TAILQ_FOREACH(elf, elf_queue, link) {
1531 print_wrapper(pctx, print_func,
1532 " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1533 elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1534 elf_idx++;
1535 }
1536 }
1537
1538 #ifdef CFG_UNWIND
1539 /* Called by libunw */
find_exidx(vaddr_t addr,vaddr_t * idx_start,vaddr_t * idx_end)1540 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end)
1541 {
1542 struct segment *seg = NULL;
1543 struct ta_elf *elf = NULL;
1544 vaddr_t a = 0;
1545
1546 TAILQ_FOREACH(elf, &main_elf_queue, link) {
1547 if (addr < elf->load_addr)
1548 continue;
1549 a = addr - elf->load_addr;
1550 TAILQ_FOREACH(seg, &elf->segs, link) {
1551 if (a < seg->vaddr)
1552 continue;
1553 if (a - seg->vaddr < seg->filesz) {
1554 *idx_start = elf->exidx_start + elf->load_addr;
1555 *idx_end = elf->exidx_start + elf->load_addr +
1556 elf->exidx_size;
1557 return true;
1558 }
1559 }
1560 }
1561
1562 return false;
1563 }
1564
ta_elf_stack_trace_a32(uint32_t regs[16])1565 void ta_elf_stack_trace_a32(uint32_t regs[16])
1566 {
1567 struct unwind_state_arm32 state = { };
1568
1569 memcpy(state.registers, regs, sizeof(state.registers));
1570 print_stack_arm32(&state, ta_stack, ta_stack_size);
1571 }
1572
ta_elf_stack_trace_a64(uint64_t fp,uint64_t sp,uint64_t pc)1573 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1574 {
1575 struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1576
1577 print_stack_arm64(&state, ta_stack, ta_stack_size);
1578 }
1579 #endif
1580
ta_elf_add_library(const TEE_UUID * uuid)1581 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1582 {
1583 TEE_Result res = TEE_ERROR_GENERIC;
1584 struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1585 struct ta_elf *lib = ta_elf_find_elf(uuid);
1586 struct ta_elf *elf = NULL;
1587
1588 if (lib)
1589 return TEE_SUCCESS; /* Already mapped */
1590
1591 lib = queue_elf_helper(uuid);
1592 if (!lib)
1593 return TEE_ERROR_OUT_OF_MEMORY;
1594
1595 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1596 ta_elf_load_dependency(elf, ta->is_32bit);
1597
1598 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1599 ta_elf_relocate(elf);
1600 ta_elf_finalize_mappings(elf);
1601 }
1602
1603 for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1604 DMSG("ELF (%pUl) at %#"PRIxVA,
1605 (void *)&elf->uuid, elf->load_addr);
1606
1607 res = ta_elf_set_init_fini_info_compat(ta->is_32bit);
1608 if (res)
1609 return res;
1610
1611 return ta_elf_set_elf_phdr_info(ta->is_32bit);
1612 }
1613
1614 /* Get address/size of .init_array and .fini_array from the dynamic segment */
get_init_fini_array(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1615 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1616 vaddr_t addr, size_t memsz, vaddr_t *init,
1617 size_t *init_cnt, vaddr_t *fini,
1618 size_t *fini_cnt)
1619 {
1620 size_t addrsz = 0;
1621 size_t dyn_entsize = 0;
1622 size_t num_dyns = 0;
1623 size_t n = 0;
1624 unsigned int tag = 0;
1625 size_t val = 0;
1626
1627 assert(type == PT_DYNAMIC);
1628
1629 check_phdr_in_range(elf, type, addr, memsz);
1630
1631 if (elf->is_32bit) {
1632 dyn_entsize = sizeof(Elf32_Dyn);
1633 addrsz = 4;
1634 } else {
1635 dyn_entsize = sizeof(Elf64_Dyn);
1636 addrsz = 8;
1637 }
1638
1639 assert(!(memsz % dyn_entsize));
1640 num_dyns = memsz / dyn_entsize;
1641
1642 for (n = 0; n < num_dyns; n++) {
1643 read_dyn(elf, addr, n, &tag, &val);
1644 if (tag == DT_INIT_ARRAY)
1645 *init = val + elf->load_addr;
1646 else if (tag == DT_FINI_ARRAY)
1647 *fini = val + elf->load_addr;
1648 else if (tag == DT_INIT_ARRAYSZ)
1649 *init_cnt = val / addrsz;
1650 else if (tag == DT_FINI_ARRAYSZ)
1651 *fini_cnt = val / addrsz;
1652 }
1653 }
1654
1655 /* Get address/size of .init_array and .fini_array in @elf (if present) */
elf_get_init_fini_array(struct ta_elf * elf,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1656 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1657 size_t *init_cnt, vaddr_t *fini,
1658 size_t *fini_cnt)
1659 {
1660 size_t n = 0;
1661
1662 if (elf->is_32bit) {
1663 Elf32_Phdr *phdr = elf->phdr;
1664
1665 for (n = 0; n < elf->e_phnum; n++) {
1666 if (phdr[n].p_type == PT_DYNAMIC) {
1667 get_init_fini_array(elf, phdr[n].p_type,
1668 phdr[n].p_vaddr,
1669 phdr[n].p_memsz,
1670 init, init_cnt, fini,
1671 fini_cnt);
1672 return;
1673 }
1674 }
1675 } else {
1676 Elf64_Phdr *phdr = elf->phdr;
1677
1678 for (n = 0; n < elf->e_phnum; n++) {
1679 if (phdr[n].p_type == PT_DYNAMIC) {
1680 get_init_fini_array(elf, phdr[n].p_type,
1681 phdr[n].p_vaddr,
1682 phdr[n].p_memsz,
1683 init, init_cnt, fini,
1684 fini_cnt);
1685 return;
1686 }
1687 }
1688 }
1689 }
1690
1691 /*
1692 * Deprecated by __elf_phdr_info below. Kept for compatibility.
1693 *
1694 * Pointers to ELF initialization and finalization functions are extracted by
1695 * ldelf and stored on the TA heap, then exported to the TA via the global
1696 * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism.
1697 */
1698
1699 struct __init_fini {
1700 uint32_t flags;
1701 uint16_t init_size;
1702 uint16_t fini_size;
1703
1704 void (**init)(void); /* @init_size entries */
1705 void (**fini)(void); /* @fini_size entries */
1706 };
1707
1708 #define __IFS_VALID BIT(0)
1709 #define __IFS_INIT_HAS_RUN BIT(1)
1710 #define __IFS_FINI_HAS_RUN BIT(2)
1711
1712 struct __init_fini_info {
1713 uint32_t reserved;
1714 uint16_t size;
1715 uint16_t pad;
1716 struct __init_fini *ifs; /* @size entries */
1717 };
1718
1719 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */
1720
1721 struct __init_fini32 {
1722 uint32_t flags;
1723 uint16_t init_size;
1724 uint16_t fini_size;
1725 uint32_t init;
1726 uint32_t fini;
1727 };
1728
1729 struct __init_fini_info32 {
1730 uint32_t reserved;
1731 uint16_t size;
1732 uint16_t pad;
1733 uint32_t ifs;
1734 };
1735
realloc_ifs(vaddr_t va,size_t cnt,bool is_32bit)1736 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1737 {
1738 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1739 struct __init_fini_info *info = (struct __init_fini_info *)va;
1740 struct __init_fini32 *ifs32 = NULL;
1741 struct __init_fini *ifs = NULL;
1742 size_t prev_cnt = 0;
1743 void *ptr = NULL;
1744
1745 if (is_32bit) {
1746 ptr = (void *)(vaddr_t)info32->ifs;
1747 ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1748 if (!ptr)
1749 return TEE_ERROR_OUT_OF_MEMORY;
1750 ifs32 = ptr;
1751 prev_cnt = info32->size;
1752 if (cnt > prev_cnt)
1753 memset(ifs32 + prev_cnt, 0,
1754 (cnt - prev_cnt) * sizeof(*ifs32));
1755 info32->ifs = (uint32_t)(vaddr_t)ifs32;
1756 info32->size = cnt;
1757 } else {
1758 ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1759 if (!ptr)
1760 return TEE_ERROR_OUT_OF_MEMORY;
1761 ifs = ptr;
1762 prev_cnt = info->size;
1763 if (cnt > prev_cnt)
1764 memset(ifs + prev_cnt, 0,
1765 (cnt - prev_cnt) * sizeof(*ifs));
1766 info->ifs = ifs;
1767 info->size = cnt;
1768 }
1769
1770 return TEE_SUCCESS;
1771 }
1772
fill_ifs(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1773 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1774 {
1775 struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1776 struct __init_fini_info *info = (struct __init_fini_info *)va;
1777 struct __init_fini32 *ifs32 = NULL;
1778 struct __init_fini *ifs = NULL;
1779 size_t init_cnt = 0;
1780 size_t fini_cnt = 0;
1781 vaddr_t init = 0;
1782 vaddr_t fini = 0;
1783
1784 if (is_32bit) {
1785 assert(idx < info32->size);
1786 ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1787
1788 if (ifs32->flags & __IFS_VALID)
1789 return;
1790
1791 elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1792 &fini_cnt);
1793
1794 ifs32->init = (uint32_t)init;
1795 ifs32->init_size = init_cnt;
1796
1797 ifs32->fini = (uint32_t)fini;
1798 ifs32->fini_size = fini_cnt;
1799
1800 ifs32->flags |= __IFS_VALID;
1801 } else {
1802 assert(idx < info->size);
1803 ifs = &info->ifs[idx];
1804
1805 if (ifs->flags & __IFS_VALID)
1806 return;
1807
1808 elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1809 &fini_cnt);
1810
1811 ifs->init = (void (**)(void))init;
1812 ifs->init_size = init_cnt;
1813
1814 ifs->fini = (void (**)(void))fini;
1815 ifs->fini_size = fini_cnt;
1816
1817 ifs->flags |= __IFS_VALID;
1818 }
1819 }
1820
1821 /*
1822 * Set or update __init_fini_info in the TA with information from the ELF
1823 * queue
1824 */
ta_elf_set_init_fini_info_compat(bool is_32bit)1825 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit)
1826 {
1827 struct __init_fini_info *info = NULL;
1828 TEE_Result res = TEE_SUCCESS;
1829 struct ta_elf *elf = NULL;
1830 vaddr_t info_va = 0;
1831 size_t cnt = 0;
1832
1833 res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL);
1834 if (res) {
1835 if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1836 /*
1837 * Not an error, only TAs linked against libutee from
1838 * OP-TEE 3.9.0 have this symbol.
1839 */
1840 return TEE_SUCCESS;
1841 }
1842 return res;
1843 }
1844 assert(info_va);
1845
1846 info = (struct __init_fini_info *)info_va;
1847 if (info->reserved)
1848 return TEE_ERROR_NOT_SUPPORTED;
1849
1850 TAILQ_FOREACH(elf, &main_elf_queue, link)
1851 cnt++;
1852
1853 /* Queue has at least one file (main) */
1854 assert(cnt);
1855
1856 res = realloc_ifs(info_va, cnt, is_32bit);
1857 if (res)
1858 goto err;
1859
1860 cnt = 0;
1861 TAILQ_FOREACH(elf, &main_elf_queue, link) {
1862 fill_ifs(info_va, cnt, elf, is_32bit);
1863 cnt++;
1864 }
1865
1866 return TEE_SUCCESS;
1867 err:
1868 free(info);
1869 return res;
1870 }
1871
realloc_elf_phdr_info(vaddr_t va,size_t cnt,bool is_32bit)1872 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit)
1873 {
1874 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1875 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1876 struct dl_phdr_info32 *dlpi32 = NULL;
1877 struct dl_phdr_info *dlpi = NULL;
1878 size_t prev_cnt = 0;
1879 void *ptr = NULL;
1880
1881 if (is_32bit) {
1882 ptr = (void *)(vaddr_t)info32->dlpi;
1883 ptr = realloc(ptr, cnt * sizeof(*dlpi32));
1884 if (!ptr)
1885 return TEE_ERROR_OUT_OF_MEMORY;
1886 dlpi32 = ptr;
1887 prev_cnt = info32->count;
1888 if (cnt > prev_cnt)
1889 memset(dlpi32 + prev_cnt, 0,
1890 (cnt - prev_cnt) * sizeof(*dlpi32));
1891 info32->dlpi = (uint32_t)(vaddr_t)dlpi32;
1892 info32->count = cnt;
1893 } else {
1894 ptr = realloc(info->dlpi, cnt * sizeof(*dlpi));
1895 if (!ptr)
1896 return TEE_ERROR_OUT_OF_MEMORY;
1897 dlpi = ptr;
1898 prev_cnt = info->count;
1899 if (cnt > prev_cnt)
1900 memset(dlpi + prev_cnt, 0,
1901 (cnt - prev_cnt) * sizeof(*dlpi));
1902 info->dlpi = dlpi;
1903 info->count = cnt;
1904 }
1905
1906 return TEE_SUCCESS;
1907 }
1908
fill_elf_phdr_info(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1909 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf,
1910 bool is_32bit)
1911 {
1912 struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1913 struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1914 struct dl_phdr_info32 *dlpi32 = NULL;
1915 struct dl_phdr_info *dlpi = NULL;
1916
1917 if (is_32bit) {
1918 assert(idx < info32->count);
1919 dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx;
1920
1921 dlpi32->dlpi_addr = elf->load_addr;
1922 if (elf->soname)
1923 dlpi32->dlpi_name = (vaddr_t)elf->soname;
1924 else
1925 dlpi32->dlpi_name = (vaddr_t)&info32->zero;
1926 dlpi32->dlpi_phdr = (vaddr_t)elf->phdr;
1927 dlpi32->dlpi_phnum = elf->e_phnum;
1928 dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */
1929 dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */
1930 dlpi32->dlpi_tls_modid = elf->tls_mod_id;
1931 dlpi32->dlpi_tls_data = elf->tls_start;
1932 } else {
1933 assert(idx < info->count);
1934 dlpi = info->dlpi + idx;
1935
1936 dlpi->dlpi_addr = elf->load_addr;
1937 if (elf->soname)
1938 dlpi->dlpi_name = elf->soname;
1939 else
1940 dlpi->dlpi_name = &info32->zero;
1941 dlpi->dlpi_phdr = elf->phdr;
1942 dlpi->dlpi_phnum = elf->e_phnum;
1943 dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */
1944 dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */
1945 dlpi->dlpi_tls_modid = elf->tls_mod_id;
1946 dlpi->dlpi_tls_data = (void *)elf->tls_start;
1947 }
1948 }
1949
1950 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */
ta_elf_set_elf_phdr_info(bool is_32bit)1951 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit)
1952 {
1953 struct __elf_phdr_info *info = NULL;
1954 TEE_Result res = TEE_SUCCESS;
1955 struct ta_elf *elf = NULL;
1956 vaddr_t info_va = 0;
1957 size_t cnt = 0;
1958
1959 res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL);
1960 if (res) {
1961 if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1962 /* Older TA */
1963 return TEE_SUCCESS;
1964 }
1965 return res;
1966 }
1967 assert(info_va);
1968
1969 info = (struct __elf_phdr_info *)info_va;
1970 if (info->reserved)
1971 return TEE_ERROR_NOT_SUPPORTED;
1972
1973 TAILQ_FOREACH(elf, &main_elf_queue, link)
1974 cnt++;
1975
1976 res = realloc_elf_phdr_info(info_va, cnt, is_32bit);
1977 if (res)
1978 return res;
1979
1980 cnt = 0;
1981 TAILQ_FOREACH(elf, &main_elf_queue, link) {
1982 fill_elf_phdr_info(info_va, cnt, elf, is_32bit);
1983 cnt++;
1984 }
1985
1986 return TEE_SUCCESS;
1987 }
1988