1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  * Copyright (c) 2020-2023, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <config.h>
9 #include <confine_array_index.h>
10 #include <elf32.h>
11 #include <elf64.h>
12 #include <elf_common.h>
13 #include <ldelf.h>
14 #include <link.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string_ext.h>
18 #include <string.h>
19 #include <tee_api_types.h>
20 #include <tee_internal_api_extensions.h>
21 #include <unw/unwind.h>
22 #include <user_ta_header.h>
23 #include <util.h>
24 
25 #include "sys.h"
26 #include "ta_elf.h"
27 
28 /*
29  * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit
30  * TA
31  */
32 struct dl_phdr_info32 {
33 	uint32_t dlpi_addr;
34 	uint32_t dlpi_name;
35 	uint32_t dlpi_phdr;
36 	uint16_t dlpi_phnum;
37 	uint64_t dlpi_adds;
38 	uint64_t dlpi_subs;
39 	uint32_t dlpi_tls_modid;
40 	uint32_t dlpi_tls_data;
41 };
42 
43 static vaddr_t ta_stack;
44 static vaddr_t ta_stack_size;
45 
46 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
47 
48 /*
49  * Main application is always ID 1, shared libraries with TLS take IDs 2 and
50  * above
51  */
assign_tls_mod_id(struct ta_elf * elf)52 static void assign_tls_mod_id(struct ta_elf *elf)
53 {
54 	static size_t last_tls_mod_id = 1;
55 
56 	if (elf->is_main)
57 		assert(last_tls_mod_id == 1); /* Main always comes first */
58 	elf->tls_mod_id = last_tls_mod_id++;
59 }
60 
queue_elf_helper(const TEE_UUID * uuid)61 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
62 {
63 	struct ta_elf *elf = calloc(1, sizeof(*elf));
64 
65 	if (!elf)
66 		return NULL;
67 
68 	TAILQ_INIT(&elf->segs);
69 
70 	elf->uuid = *uuid;
71 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
72 	return elf;
73 }
74 
queue_elf(const TEE_UUID * uuid)75 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
76 {
77 	struct ta_elf *elf = ta_elf_find_elf(uuid);
78 
79 	if (elf)
80 		return NULL;
81 
82 	elf = queue_elf_helper(uuid);
83 	if (!elf)
84 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
85 
86 	return elf;
87 }
88 
ta_elf_find_elf(const TEE_UUID * uuid)89 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
90 {
91 	struct ta_elf *elf = NULL;
92 
93 	TAILQ_FOREACH(elf, &main_elf_queue, link)
94 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
95 			return elf;
96 
97 	return NULL;
98 }
99 
100 #if defined(ARM32) || defined(ARM64)
e32_parse_ehdr(struct ta_elf * elf,Elf32_Ehdr * ehdr)101 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
102 {
103 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
104 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
105 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
106 	    (ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE &&
107 	     ehdr->e_ident[EI_OSABI] != ELFOSABI_ARM) ||
108 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
109 #ifndef CFG_WITH_VFP
110 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
111 #endif
112 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
113 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
114 		return TEE_ERROR_BAD_FORMAT;
115 
116 	if (ehdr->e_ident[EI_OSABI] == ELFOSABI_NONE &&
117 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_V5)
118 		return TEE_ERROR_BAD_FORMAT;
119 
120 	if (ehdr->e_ident[EI_OSABI] == ELFOSABI_ARM &&
121 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_UNKNOWN)
122 		return TEE_ERROR_BAD_FORMAT;
123 
124 	elf->is_32bit = true;
125 	elf->e_entry = ehdr->e_entry;
126 	elf->e_phoff = ehdr->e_phoff;
127 	elf->e_shoff = ehdr->e_shoff;
128 	elf->e_phnum = ehdr->e_phnum;
129 	elf->e_shnum = ehdr->e_shnum;
130 	elf->e_phentsize = ehdr->e_phentsize;
131 	elf->e_shentsize = ehdr->e_shentsize;
132 
133 	return TEE_SUCCESS;
134 }
135 
136 #ifdef ARM64
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)137 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
138 {
139 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
140 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
141 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
142 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
143 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
144 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
145 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
146 		return TEE_ERROR_BAD_FORMAT;
147 
148 
149 	elf->is_32bit = false;
150 	elf->e_entry = ehdr->e_entry;
151 	elf->e_phoff = ehdr->e_phoff;
152 	elf->e_shoff = ehdr->e_shoff;
153 	elf->e_phnum = ehdr->e_phnum;
154 	elf->e_shnum = ehdr->e_shnum;
155 	elf->e_phentsize = ehdr->e_phentsize;
156 	elf->e_shentsize = ehdr->e_shentsize;
157 
158 	return TEE_SUCCESS;
159 }
160 #else /*ARM64*/
e64_parse_ehdr(struct ta_elf * elf __unused,Elf64_Ehdr * ehdr __unused)161 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
162 				 Elf64_Ehdr *ehdr __unused)
163 {
164 	return TEE_ERROR_NOT_SUPPORTED;
165 }
166 #endif /*ARM64*/
167 #endif /* ARM32 || ARM64 */
168 
169 #if defined(RV64)
e32_parse_ehdr(struct ta_elf * elf __unused,Elf32_Ehdr * ehdr __unused)170 static TEE_Result e32_parse_ehdr(struct ta_elf *elf __unused,
171 				 Elf32_Ehdr *ehdr __unused)
172 {
173 		return TEE_ERROR_BAD_FORMAT;
174 }
175 
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)176 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
177 {
178 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
179 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
180 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
181 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
182 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_RISCV ||
183 	    ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
184 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
185 		return TEE_ERROR_BAD_FORMAT;
186 
187 	elf->is_32bit = false;
188 	elf->e_entry = ehdr->e_entry;
189 	elf->e_phoff = ehdr->e_phoff;
190 	elf->e_shoff = ehdr->e_shoff;
191 	elf->e_phnum = ehdr->e_phnum;
192 	elf->e_shnum = ehdr->e_shnum;
193 	elf->e_phentsize = ehdr->e_phentsize;
194 	elf->e_shentsize = ehdr->e_shentsize;
195 
196 	return TEE_SUCCESS;
197 }
198 #endif /* RV64 */
199 
check_phdr_in_range(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)200 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
201 				vaddr_t addr, size_t memsz)
202 {
203 	vaddr_t max_addr = 0;
204 
205 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
206 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
207 
208 	/*
209 	 * elf->load_addr and elf->max_addr are both using the
210 	 * final virtual addresses, while this program header is
211 	 * relative to 0.
212 	 */
213 	if (max_addr > elf->max_addr - elf->load_addr)
214 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
215 		    type);
216 }
217 
read_dyn(struct ta_elf * elf,vaddr_t addr,size_t idx,unsigned int * tag,size_t * val)218 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
219 		     size_t idx, unsigned int *tag, size_t *val)
220 {
221 	if (elf->is_32bit) {
222 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
223 
224 		*tag = dyn[idx].d_tag;
225 		*val = dyn[idx].d_un.d_val;
226 	} else {
227 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
228 
229 		*tag = dyn[idx].d_tag;
230 		*val = dyn[idx].d_un.d_val;
231 	}
232 }
233 
check_range(struct ta_elf * elf,const char * name,const void * ptr,size_t sz)234 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
235 			size_t sz)
236 {
237 	size_t max_addr = 0;
238 
239 	if ((vaddr_t)ptr < elf->load_addr)
240 		err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr);
241 
242 	if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
243 		err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name);
244 
245 	if (max_addr > elf->max_addr)
246 		err(TEE_ERROR_BAD_FORMAT,
247 		    "%s %p..%#zx out of range", name, ptr, max_addr);
248 }
249 
check_hashtab(struct ta_elf * elf,void * ptr,size_t num_buckets,size_t num_chains)250 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
251 			  size_t num_chains)
252 {
253 	/*
254 	 * Starting from 2 as the first two words are mandatory and hold
255 	 * num_buckets and num_chains. So this function is called twice,
256 	 * first to see that there's indeed room for num_buckets and
257 	 * num_chains and then to see that all of it fits.
258 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
259 	 */
260 	size_t num_words = 2;
261 	size_t sz = 0;
262 
263 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
264 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr);
265 
266 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
267 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
268 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
269 		err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow");
270 
271 	check_range(elf, "DT_HASH", ptr, sz);
272 }
273 
check_gnu_hashtab(struct ta_elf * elf,void * ptr)274 static void check_gnu_hashtab(struct ta_elf *elf, void *ptr)
275 {
276 	struct gnu_hashtab *h = ptr;
277 	size_t num_words = 4; /* nbuckets, symoffset, bloom_size, bloom_shift */
278 	size_t bloom_words = 0;
279 	size_t sz = 0;
280 
281 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
282 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_GNU_HASH %p",
283 		    ptr);
284 
285 	if (elf->gnu_hashtab_size < sizeof(*h))
286 		err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH too small");
287 
288 	/* Check validity of h->nbuckets and h->bloom_size */
289 
290 	if (elf->is_32bit)
291 		bloom_words = h->bloom_size;
292 	else
293 		bloom_words = h->bloom_size * 2;
294 	if (ADD_OVERFLOW(num_words, h->nbuckets, &num_words) ||
295 	    ADD_OVERFLOW(num_words, bloom_words, &num_words) ||
296 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz) ||
297 	    sz > elf->gnu_hashtab_size)
298 		err(TEE_ERROR_BAD_FORMAT, "DT_GNU_HASH overflow");
299 }
300 
save_hashtab(struct ta_elf * elf)301 static void save_hashtab(struct ta_elf *elf)
302 {
303 	uint32_t *hashtab = NULL;
304 	size_t n = 0;
305 
306 	if (elf->is_32bit) {
307 		Elf32_Shdr *shdr = elf->shdr;
308 
309 		for (n = 0; n < elf->e_shnum; n++) {
310 			void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
311 						       elf->load_addr);
312 
313 			if (shdr[n].sh_type == SHT_HASH) {
314 				elf->hashtab = addr;
315 			} else if (shdr[n].sh_type == SHT_GNU_HASH) {
316 				elf->gnu_hashtab = addr;
317 				elf->gnu_hashtab_size = shdr[n].sh_size;
318 			}
319 		}
320 	} else {
321 		Elf64_Shdr *shdr = elf->shdr;
322 
323 		for (n = 0; n < elf->e_shnum; n++) {
324 			void *addr = (void *)(vaddr_t)(shdr[n].sh_addr +
325 						       elf->load_addr);
326 
327 			if (shdr[n].sh_type == SHT_HASH) {
328 				elf->hashtab = addr;
329 			} else if (shdr[n].sh_type == SHT_GNU_HASH) {
330 				elf->gnu_hashtab = addr;
331 				elf->gnu_hashtab_size = shdr[n].sh_size;
332 			}
333 		}
334 	}
335 
336 	if (elf->hashtab) {
337 		check_hashtab(elf, elf->hashtab, 0, 0);
338 		hashtab = elf->hashtab;
339 		check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
340 	}
341 	if (elf->gnu_hashtab)
342 		check_gnu_hashtab(elf, elf->gnu_hashtab);
343 }
344 
save_soname_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)345 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type,
346 				     vaddr_t addr, size_t memsz)
347 {
348 	size_t dyn_entsize = 0;
349 	size_t num_dyns = 0;
350 	size_t n = 0;
351 	unsigned int tag = 0;
352 	size_t val = 0;
353 	char *str_tab = NULL;
354 
355 	if (type != PT_DYNAMIC)
356 		return;
357 
358 	if (elf->is_32bit)
359 		dyn_entsize = sizeof(Elf32_Dyn);
360 	else
361 		dyn_entsize = sizeof(Elf64_Dyn);
362 
363 	assert(!(memsz % dyn_entsize));
364 	num_dyns = memsz / dyn_entsize;
365 
366 	for (n = 0; n < num_dyns; n++) {
367 		read_dyn(elf, addr, n, &tag, &val);
368 		if (tag == DT_STRTAB) {
369 			str_tab = (char *)(val + elf->load_addr);
370 			break;
371 		}
372 	}
373 	for (n = 0; n < num_dyns; n++) {
374 		read_dyn(elf, addr, n, &tag, &val);
375 		if (tag == DT_SONAME) {
376 			elf->soname = str_tab + val;
377 			break;
378 		}
379 	}
380 }
381 
save_soname(struct ta_elf * elf)382 static void save_soname(struct ta_elf *elf)
383 {
384 	size_t n = 0;
385 
386 	if (elf->is_32bit) {
387 		Elf32_Phdr *phdr = elf->phdr;
388 
389 		for (n = 0; n < elf->e_phnum; n++)
390 			save_soname_from_segment(elf, phdr[n].p_type,
391 						 phdr[n].p_vaddr,
392 						 phdr[n].p_memsz);
393 	} else {
394 		Elf64_Phdr *phdr = elf->phdr;
395 
396 		for (n = 0; n < elf->e_phnum; n++)
397 			save_soname_from_segment(elf, phdr[n].p_type,
398 						 phdr[n].p_vaddr,
399 						 phdr[n].p_memsz);
400 	}
401 }
402 
e32_save_symtab(struct ta_elf * elf,size_t tab_idx)403 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
404 {
405 	Elf32_Shdr *shdr = elf->shdr;
406 	size_t str_idx = shdr[tab_idx].sh_link;
407 
408 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
409 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym))
410 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p",
411 		    elf->dynsymtab);
412 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
413 
414 	if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
415 		err(TEE_ERROR_BAD_FORMAT,
416 		    "Size of dynsymtab not an even multiple of Elf32_Sym");
417 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
418 
419 	if (str_idx >= elf->e_shnum)
420 		err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range");
421 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
422 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
423 
424 	elf->dynstr_size = shdr[str_idx].sh_size;
425 }
426 
e64_save_symtab(struct ta_elf * elf,size_t tab_idx)427 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
428 {
429 	Elf64_Shdr *shdr = elf->shdr;
430 	size_t str_idx = shdr[tab_idx].sh_link;
431 
432 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
433 					   elf->load_addr);
434 
435 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym))
436 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p",
437 		    elf->dynsymtab);
438 	check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab,
439 		    shdr[tab_idx].sh_size);
440 
441 	if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
442 		err(TEE_ERROR_BAD_FORMAT,
443 		    "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym");
444 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
445 
446 	if (str_idx >= elf->e_shnum)
447 		err(TEE_ERROR_BAD_FORMAT,
448 		    ".dynstr/STRTAB section index out of range");
449 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
450 	check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size);
451 
452 	elf->dynstr_size = shdr[str_idx].sh_size;
453 }
454 
save_symtab(struct ta_elf * elf)455 static void save_symtab(struct ta_elf *elf)
456 {
457 	size_t n = 0;
458 
459 	if (elf->is_32bit) {
460 		Elf32_Shdr *shdr = elf->shdr;
461 
462 		for (n = 0; n < elf->e_shnum; n++) {
463 			if (shdr[n].sh_type == SHT_DYNSYM) {
464 				e32_save_symtab(elf, n);
465 				break;
466 			}
467 		}
468 	} else {
469 		Elf64_Shdr *shdr = elf->shdr;
470 
471 		for (n = 0; n < elf->e_shnum; n++) {
472 			if (shdr[n].sh_type == SHT_DYNSYM) {
473 				e64_save_symtab(elf, n);
474 				break;
475 			}
476 		}
477 
478 	}
479 
480 	save_hashtab(elf);
481 	save_soname(elf);
482 }
483 
init_elf(struct ta_elf * elf)484 static void init_elf(struct ta_elf *elf)
485 {
486 	TEE_Result res = TEE_SUCCESS;
487 	vaddr_t va = 0;
488 	uint32_t flags = LDELF_MAP_FLAG_SHAREABLE;
489 	size_t sz = 0;
490 
491 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
492 	if (res)
493 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
494 
495 	/*
496 	 * Map it read-only executable when we're loading a library where
497 	 * the ELF header is included in a load segment.
498 	 */
499 	if (!elf->is_main)
500 		flags |= LDELF_MAP_FLAG_EXECUTABLE;
501 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
502 	if (res)
503 		err(res, "sys_map_ta_bin");
504 	elf->ehdr_addr = va;
505 	if (!elf->is_main) {
506 		elf->load_addr = va;
507 		elf->max_addr = va + SMALL_PAGE_SIZE;
508 		elf->max_offs = SMALL_PAGE_SIZE;
509 	}
510 
511 	if (!IS_ELF(*(Elf32_Ehdr *)va))
512 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
513 
514 	res = e32_parse_ehdr(elf, (void *)va);
515 	if (res == TEE_ERROR_BAD_FORMAT)
516 		res = e64_parse_ehdr(elf, (void *)va);
517 	if (res)
518 		err(res, "Cannot parse ELF");
519 
520 	if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
521 	    ADD_OVERFLOW(sz, elf->e_phoff, &sz))
522 		err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
523 
524 	if (sz > SMALL_PAGE_SIZE)
525 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
526 
527 	elf->phdr = (void *)(va + elf->e_phoff);
528 }
529 
roundup(size_t v)530 static size_t roundup(size_t v)
531 {
532 	return ROUNDUP(v, SMALL_PAGE_SIZE);
533 }
534 
rounddown(size_t v)535 static size_t rounddown(size_t v)
536 {
537 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
538 }
539 
add_segment(struct ta_elf * elf,size_t offset,size_t vaddr,size_t filesz,size_t memsz,size_t flags,size_t align)540 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
541 			size_t filesz, size_t memsz, size_t flags, size_t align)
542 {
543 	struct segment *seg = calloc(1, sizeof(*seg));
544 
545 	if (!seg)
546 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
547 
548 	if (memsz < filesz)
549 		err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
550 
551 	seg->offset = offset;
552 	seg->vaddr = vaddr;
553 	seg->filesz = filesz;
554 	seg->memsz = memsz;
555 	seg->flags = flags;
556 	seg->align = align;
557 
558 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
559 }
560 
parse_load_segments(struct ta_elf * elf)561 static void parse_load_segments(struct ta_elf *elf)
562 {
563 	size_t n = 0;
564 
565 	if (elf->is_32bit) {
566 		Elf32_Phdr *phdr = elf->phdr;
567 
568 		for (n = 0; n < elf->e_phnum; n++)
569 			if (phdr[n].p_type == PT_LOAD) {
570 				add_segment(elf, phdr[n].p_offset,
571 					    phdr[n].p_vaddr, phdr[n].p_filesz,
572 					    phdr[n].p_memsz, phdr[n].p_flags,
573 					    phdr[n].p_align);
574 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
575 				elf->exidx_start = phdr[n].p_vaddr;
576 				elf->exidx_size = phdr[n].p_filesz;
577 			} else if (phdr[n].p_type == PT_TLS) {
578 				assign_tls_mod_id(elf);
579 			}
580 	} else {
581 		Elf64_Phdr *phdr = elf->phdr;
582 
583 		for (n = 0; n < elf->e_phnum; n++)
584 			if (phdr[n].p_type == PT_LOAD) {
585 				add_segment(elf, phdr[n].p_offset,
586 					    phdr[n].p_vaddr, phdr[n].p_filesz,
587 					    phdr[n].p_memsz, phdr[n].p_flags,
588 					    phdr[n].p_align);
589 			} else if (phdr[n].p_type == PT_TLS) {
590 				elf->tls_start = phdr[n].p_vaddr;
591 				elf->tls_filesz = phdr[n].p_filesz;
592 				elf->tls_memsz = phdr[n].p_memsz;
593 			} else if (IS_ENABLED(CFG_TA_BTI) &&
594 				   phdr[n].p_type == PT_GNU_PROPERTY) {
595 				elf->prop_start = phdr[n].p_vaddr;
596 				elf->prop_align = phdr[n].p_align;
597 				elf->prop_memsz = phdr[n].p_memsz;
598 			}
599 	}
600 }
601 
copy_remapped_to(struct ta_elf * elf,const struct segment * seg)602 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
603 {
604 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
605 	size_t n = 0;
606 	size_t offs = seg->offset;
607 	size_t num_bytes = seg->filesz;
608 
609 	if (offs < elf->max_offs) {
610 		n = MIN(elf->max_offs - offs, num_bytes);
611 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
612 		dst += n;
613 		offs += n;
614 		num_bytes -= n;
615 	}
616 
617 	if (num_bytes) {
618 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
619 						      elf->handle, offs);
620 
621 		if (res)
622 			err(res, "sys_copy_from_ta_bin");
623 		elf->max_offs += offs;
624 	}
625 }
626 
adjust_segments(struct ta_elf * elf)627 static void adjust_segments(struct ta_elf *elf)
628 {
629 	struct segment *seg = NULL;
630 	struct segment *prev_seg = NULL;
631 	size_t prev_end_addr = 0;
632 	size_t align = 0;
633 	size_t mask = 0;
634 
635 	/* Sanity check */
636 	TAILQ_FOREACH(seg, &elf->segs, link) {
637 		size_t dummy __maybe_unused = 0;
638 
639 		assert(seg->align >= SMALL_PAGE_SIZE);
640 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
641 		assert(seg->filesz <= seg->memsz);
642 		assert((seg->offset & SMALL_PAGE_MASK) ==
643 		       (seg->vaddr & SMALL_PAGE_MASK));
644 
645 		prev_seg = TAILQ_PREV(seg, segment_head, link);
646 		if (prev_seg) {
647 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
648 			assert(seg->offset >=
649 			       prev_seg->offset + prev_seg->filesz);
650 		}
651 		if (!align)
652 			align = seg->align;
653 		assert(align == seg->align);
654 	}
655 
656 	mask = align - 1;
657 
658 	seg = TAILQ_FIRST(&elf->segs);
659 	if (seg)
660 		seg = TAILQ_NEXT(seg, link);
661 	while (seg) {
662 		prev_seg = TAILQ_PREV(seg, segment_head, link);
663 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
664 
665 		/*
666 		 * This segment may overlap with the last "page" in the
667 		 * previous segment in two different ways:
668 		 * 1. Virtual address (and offset) overlaps =>
669 		 *    Permissions needs to be merged. The offset must have
670 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
671 		 *    add up with prevsion segment.
672 		 *
673 		 * 2. Only offset overlaps =>
674 		 *    The same page in the ELF is mapped at two different
675 		 *    virtual addresses. As a limitation this segment must
676 		 *    be mapped as writeable.
677 		 */
678 
679 		/* Case 1. */
680 		if (rounddown(seg->vaddr) < prev_end_addr) {
681 			assert((seg->vaddr & mask) == (seg->offset & mask));
682 			assert(prev_seg->memsz == prev_seg->filesz);
683 
684 			/*
685 			 * Merge the segments and their permissions.
686 			 * Note that the may be a small hole between the
687 			 * two sections.
688 			 */
689 			prev_seg->filesz = seg->vaddr + seg->filesz -
690 					   prev_seg->vaddr;
691 			prev_seg->memsz = seg->vaddr + seg->memsz -
692 					   prev_seg->vaddr;
693 			prev_seg->flags |= seg->flags;
694 
695 			TAILQ_REMOVE(&elf->segs, seg, link);
696 			free(seg);
697 			seg = TAILQ_NEXT(prev_seg, link);
698 			continue;
699 		}
700 
701 		/* Case 2. */
702 		if ((seg->offset & mask) &&
703 		    rounddown(seg->offset) <
704 		    (prev_seg->offset + prev_seg->filesz)) {
705 
706 			assert(seg->flags & PF_W);
707 			seg->remapped_writeable = true;
708 		}
709 
710 		/*
711 		 * No overlap, but we may need to align address, offset and
712 		 * size.
713 		 */
714 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
715 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
716 		seg->vaddr = rounddown(seg->vaddr);
717 		seg->offset = rounddown(seg->offset);
718 		seg = TAILQ_NEXT(seg, link);
719 	}
720 
721 }
722 
populate_segments_legacy(struct ta_elf * elf)723 static void populate_segments_legacy(struct ta_elf *elf)
724 {
725 	TEE_Result res = TEE_SUCCESS;
726 	struct segment *seg = NULL;
727 	vaddr_t va = 0;
728 
729 	assert(elf->is_legacy);
730 	TAILQ_FOREACH(seg, &elf->segs, link) {
731 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
732 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
733 					 seg->vaddr - seg->memsz);
734 		size_t num_bytes = roundup(seg->memsz);
735 
736 		if (!elf->load_addr)
737 			va = 0;
738 		else
739 			va = seg->vaddr + elf->load_addr;
740 
741 
742 		if (!(seg->flags & PF_R))
743 			err(TEE_ERROR_NOT_SUPPORTED,
744 			    "Segment must be readable");
745 
746 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
747 		if (res)
748 			err(res, "sys_map_zi");
749 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
750 					   elf->handle, seg->offset);
751 		if (res)
752 			err(res, "sys_copy_from_ta_bin");
753 
754 		if (!elf->load_addr)
755 			elf->load_addr = va;
756 		elf->max_addr = va + num_bytes;
757 		elf->max_offs = seg->offset + seg->filesz;
758 	}
759 }
760 
get_pad_begin(void)761 static size_t get_pad_begin(void)
762 {
763 #ifdef CFG_TA_ASLR
764 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
765 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
766 	TEE_Result res = TEE_SUCCESS;
767 	uint32_t rnd32 = 0;
768 	size_t rnd = 0;
769 
770 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
771 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
772 	if (max > min) {
773 		res = sys_gen_random_num(&rnd32, sizeof(rnd32));
774 		if (res) {
775 			DMSG("Random read failed: %#"PRIx32, res);
776 			return min * SMALL_PAGE_SIZE;
777 		}
778 		rnd = rnd32 % (max - min);
779 	}
780 
781 	return (min + rnd) * SMALL_PAGE_SIZE;
782 #else /*!CFG_TA_ASLR*/
783 	return 0;
784 #endif /*!CFG_TA_ASLR*/
785 }
786 
populate_segments(struct ta_elf * elf)787 static void populate_segments(struct ta_elf *elf)
788 {
789 	TEE_Result res = TEE_SUCCESS;
790 	struct segment *seg = NULL;
791 	vaddr_t va = 0;
792 	size_t pad_begin = 0;
793 
794 	assert(!elf->is_legacy);
795 	TAILQ_FOREACH(seg, &elf->segs, link) {
796 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
797 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
798 					 seg->vaddr - seg->memsz);
799 
800 		if (seg->remapped_writeable) {
801 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
802 					   rounddown(seg->vaddr);
803 
804 			assert(elf->load_addr);
805 			va = rounddown(elf->load_addr + seg->vaddr);
806 			assert(va >= elf->max_addr);
807 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
808 			if (res)
809 				err(res, "sys_map_zi");
810 
811 			copy_remapped_to(elf, seg);
812 			elf->max_addr = va + num_bytes;
813 		} else {
814 			uint32_t flags =  0;
815 			size_t filesz = seg->filesz;
816 			size_t memsz = seg->memsz;
817 			size_t offset = seg->offset;
818 			size_t vaddr = seg->vaddr;
819 
820 			if (offset < elf->max_offs) {
821 				/*
822 				 * We're in a load segment which overlaps
823 				 * with (or is covered by) the first page
824 				 * of a shared library.
825 				 */
826 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
827 					size_t num_bytes = 0;
828 
829 					/*
830 					 * If this segment is completely
831 					 * covered, take next.
832 					 */
833 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
834 						continue;
835 
836 					/*
837 					 * All data of the segment is
838 					 * loaded, but we need to zero
839 					 * extend it.
840 					 */
841 					va = elf->max_addr;
842 					num_bytes = roundup(vaddr + memsz) -
843 						    roundup(vaddr) -
844 						    SMALL_PAGE_SIZE;
845 					assert(num_bytes);
846 					res = sys_map_zi(num_bytes, 0, &va, 0,
847 							 0);
848 					if (res)
849 						err(res, "sys_map_zi");
850 					elf->max_addr = roundup(va + num_bytes);
851 					continue;
852 				}
853 
854 				/* Partial overlap, remove the first page. */
855 				vaddr += SMALL_PAGE_SIZE;
856 				filesz -= SMALL_PAGE_SIZE;
857 				memsz -= SMALL_PAGE_SIZE;
858 				offset += SMALL_PAGE_SIZE;
859 			}
860 
861 			if (!elf->load_addr) {
862 				va = 0;
863 				pad_begin = get_pad_begin();
864 				/*
865 				 * If mapping with pad_begin fails we'll
866 				 * retry without pad_begin, effectively
867 				 * disabling ASLR for the current ELF file.
868 				 */
869 			} else {
870 				va = vaddr + elf->load_addr;
871 				pad_begin = 0;
872 			}
873 
874 			if (seg->flags & PF_W)
875 				flags |= LDELF_MAP_FLAG_WRITEABLE;
876 			else
877 				flags |= LDELF_MAP_FLAG_SHAREABLE;
878 			if (seg->flags & PF_X)
879 				flags |= LDELF_MAP_FLAG_EXECUTABLE;
880 			if (!(seg->flags & PF_R))
881 				err(TEE_ERROR_NOT_SUPPORTED,
882 				    "Segment must be readable");
883 			if (flags & LDELF_MAP_FLAG_WRITEABLE) {
884 				res = sys_map_zi(memsz, 0, &va, pad_begin,
885 						 pad_end);
886 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
887 					res = sys_map_zi(memsz, 0, &va, 0,
888 							 pad_end);
889 				if (res)
890 					err(res, "sys_map_zi");
891 				res = sys_copy_from_ta_bin((void *)va, filesz,
892 							   elf->handle, offset);
893 				if (res)
894 					err(res, "sys_copy_from_ta_bin");
895 			} else {
896 				if (filesz != memsz)
897 					err(TEE_ERROR_BAD_FORMAT,
898 					    "Filesz and memsz mismatch");
899 				res = sys_map_ta_bin(&va, filesz, flags,
900 						     elf->handle, offset,
901 						     pad_begin, pad_end);
902 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
903 					res = sys_map_ta_bin(&va, filesz, flags,
904 							     elf->handle,
905 							     offset, 0,
906 							     pad_end);
907 				if (res)
908 					err(res, "sys_map_ta_bin");
909 			}
910 
911 			if (!elf->load_addr)
912 				elf->load_addr = va;
913 			elf->max_addr = roundup(va + memsz);
914 			elf->max_offs += filesz;
915 		}
916 	}
917 }
918 
ta_elf_add_bti(struct ta_elf * elf)919 static void ta_elf_add_bti(struct ta_elf *elf)
920 {
921 	TEE_Result res = TEE_SUCCESS;
922 	struct segment *seg = NULL;
923 	uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI;
924 
925 	TAILQ_FOREACH(seg, &elf->segs, link) {
926 		vaddr_t va = elf->load_addr + seg->vaddr;
927 
928 		if (seg->flags & PF_X) {
929 			res = sys_set_prot(va, seg->memsz, flags);
930 			if (res)
931 				err(res, "sys_set_prot");
932 		}
933 	}
934 }
935 
parse_property_segment(struct ta_elf * elf)936 static void parse_property_segment(struct ta_elf *elf)
937 {
938 	char *desc = NULL;
939 	size_t align = elf->prop_align;
940 	size_t desc_offset = 0;
941 	size_t prop_offset = 0;
942 	vaddr_t va = 0;
943 	Elf_Note *note = NULL;
944 	char *name = NULL;
945 
946 	if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start)
947 		return;
948 
949 	check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start,
950 			    elf->prop_memsz);
951 
952 	va = elf->load_addr + elf->prop_start;
953 	note = (void *)va;
954 	name = (char *)(note + 1);
955 
956 	if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU))
957 		return;
958 
959 	if (note->n_type != NT_GNU_PROPERTY_TYPE_0 ||
960 	    note->n_namesz != sizeof(ELF_NOTE_GNU) ||
961 	    memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) ||
962 	    !IS_POWER_OF_TWO(align))
963 		return;
964 
965 	desc_offset = ROUNDUP2(sizeof(*note) + sizeof(ELF_NOTE_GNU), align);
966 
967 	if (desc_offset > elf->prop_memsz ||
968 	    ROUNDUP2(desc_offset + note->n_descsz, align) > elf->prop_memsz)
969 		return;
970 
971 	desc = (char *)(va + desc_offset);
972 
973 	do {
974 		Elf_Prop *prop = (void *)(desc + prop_offset);
975 		size_t data_offset = prop_offset + sizeof(*prop);
976 
977 		if (note->n_descsz < data_offset)
978 			return;
979 
980 		data_offset = confine_array_index(data_offset, note->n_descsz);
981 
982 		if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
983 			uint32_t *pr_data = (void *)(desc + data_offset);
984 
985 			if (note->n_descsz < (data_offset + sizeof(*pr_data)) &&
986 			    prop->pr_datasz != sizeof(*pr_data))
987 				return;
988 
989 			if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) {
990 				DMSG("BTI Feature present in note property");
991 				elf->bti_enabled = true;
992 			}
993 		}
994 
995 		prop_offset += ROUNDUP2(sizeof(*prop) + prop->pr_datasz, align);
996 	} while (prop_offset < note->n_descsz);
997 }
998 
map_segments(struct ta_elf * elf)999 static void map_segments(struct ta_elf *elf)
1000 {
1001 	TEE_Result res = TEE_SUCCESS;
1002 
1003 	parse_load_segments(elf);
1004 	adjust_segments(elf);
1005 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
1006 		vaddr_t va = 0;
1007 		size_t sz = elf->max_addr - elf->load_addr;
1008 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
1009 		size_t pad_begin = get_pad_begin();
1010 
1011 		/*
1012 		 * We're loading a library, if not other parts of the code
1013 		 * need to be updated too.
1014 		 */
1015 		assert(!elf->is_main);
1016 
1017 		/*
1018 		 * Now that we know how much virtual memory is needed move
1019 		 * the already mapped part to a location which can
1020 		 * accommodate us.
1021 		 */
1022 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
1023 				roundup(seg->vaddr + seg->memsz));
1024 		if (res == TEE_ERROR_OUT_OF_MEMORY)
1025 			res = sys_remap(elf->load_addr, &va, sz, 0,
1026 					roundup(seg->vaddr + seg->memsz));
1027 		if (res)
1028 			err(res, "sys_remap");
1029 		elf->ehdr_addr = va;
1030 		elf->load_addr = va;
1031 		elf->max_addr = va + sz;
1032 		elf->phdr = (void *)(va + elf->e_phoff);
1033 	}
1034 }
1035 
add_deps_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)1036 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
1037 				  vaddr_t addr, size_t memsz)
1038 {
1039 	size_t dyn_entsize = 0;
1040 	size_t num_dyns = 0;
1041 	size_t n = 0;
1042 	unsigned int tag = 0;
1043 	size_t val = 0;
1044 	TEE_UUID uuid = { };
1045 	char *str_tab = NULL;
1046 	size_t str_tab_sz = 0;
1047 
1048 	if (type != PT_DYNAMIC)
1049 		return;
1050 
1051 	check_phdr_in_range(elf, type, addr, memsz);
1052 
1053 	if (elf->is_32bit)
1054 		dyn_entsize = sizeof(Elf32_Dyn);
1055 	else
1056 		dyn_entsize = sizeof(Elf64_Dyn);
1057 
1058 	assert(!(memsz % dyn_entsize));
1059 	num_dyns = memsz / dyn_entsize;
1060 
1061 	for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
1062 		read_dyn(elf, addr, n, &tag, &val);
1063 		if (tag == DT_STRTAB)
1064 			str_tab = (char *)(val + elf->load_addr);
1065 		else if (tag == DT_STRSZ)
1066 			str_tab_sz = val;
1067 	}
1068 	check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz);
1069 
1070 	for (n = 0; n < num_dyns; n++) {
1071 		TEE_Result res = TEE_SUCCESS;
1072 
1073 		read_dyn(elf, addr, n, &tag, &val);
1074 		if (tag != DT_NEEDED)
1075 			continue;
1076 		if (val >= str_tab_sz)
1077 			err(TEE_ERROR_BAD_FORMAT,
1078 			    "Offset into .dynstr/STRTAB out of range");
1079 		res = tee_uuid_from_str(&uuid, str_tab + val);
1080 		if (res)
1081 			err(res, "Fail to get UUID from string");
1082 		queue_elf(&uuid);
1083 	}
1084 }
1085 
add_dependencies(struct ta_elf * elf)1086 static void add_dependencies(struct ta_elf *elf)
1087 {
1088 	size_t n = 0;
1089 
1090 	if (elf->is_32bit) {
1091 		Elf32_Phdr *phdr = elf->phdr;
1092 
1093 		for (n = 0; n < elf->e_phnum; n++)
1094 			add_deps_from_segment(elf, phdr[n].p_type,
1095 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1096 	} else {
1097 		Elf64_Phdr *phdr = elf->phdr;
1098 
1099 		for (n = 0; n < elf->e_phnum; n++)
1100 			add_deps_from_segment(elf, phdr[n].p_type,
1101 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1102 	}
1103 }
1104 
copy_section_headers(struct ta_elf * elf)1105 static void copy_section_headers(struct ta_elf *elf)
1106 {
1107 	TEE_Result res = TEE_SUCCESS;
1108 	size_t sz = 0;
1109 	size_t offs = 0;
1110 
1111 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
1112 		err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow");
1113 
1114 	elf->shdr = malloc(sz);
1115 	if (!elf->shdr)
1116 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
1117 
1118 	/*
1119 	 * We're assuming that section headers comes after the load segments,
1120 	 * but if it's a very small dynamically linked library the section
1121 	 * headers can still end up (partially?) in the first mapped page.
1122 	 */
1123 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
1124 		assert(!elf->is_main);
1125 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
1126 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
1127 		       offs);
1128 	}
1129 
1130 	if (offs < sz) {
1131 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
1132 					   sz - offs, elf->handle,
1133 					   elf->e_shoff + offs);
1134 		if (res)
1135 			err(res, "sys_copy_from_ta_bin");
1136 	}
1137 }
1138 
close_handle(struct ta_elf * elf)1139 static void close_handle(struct ta_elf *elf)
1140 {
1141 	TEE_Result res = sys_close_ta_bin(elf->handle);
1142 
1143 	if (res)
1144 		err(res, "sys_close_ta_bin");
1145 	elf->handle = -1;
1146 }
1147 
clean_elf_load_main(struct ta_elf * elf)1148 static void clean_elf_load_main(struct ta_elf *elf)
1149 {
1150 	TEE_Result res = TEE_SUCCESS;
1151 
1152 	/*
1153 	 * Clean up from last attempt to load
1154 	 */
1155 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
1156 	if (res)
1157 		err(res, "sys_unmap");
1158 
1159 	while (!TAILQ_EMPTY(&elf->segs)) {
1160 		struct segment *seg = TAILQ_FIRST(&elf->segs);
1161 		vaddr_t va = 0;
1162 		size_t num_bytes = 0;
1163 
1164 		va = rounddown(elf->load_addr + seg->vaddr);
1165 		if (seg->remapped_writeable)
1166 			num_bytes = roundup(seg->vaddr + seg->memsz) -
1167 				    rounddown(seg->vaddr);
1168 		else
1169 			num_bytes = seg->memsz;
1170 
1171 		res = sys_unmap(va, num_bytes);
1172 		if (res)
1173 			err(res, "sys_unmap");
1174 
1175 		TAILQ_REMOVE(&elf->segs, seg, link);
1176 		free(seg);
1177 	}
1178 
1179 	free(elf->shdr);
1180 	memset(&elf->is_32bit, 0,
1181 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
1182 
1183 	TAILQ_INIT(&elf->segs);
1184 }
1185 
1186 #ifdef ARM64
1187 /*
1188  * Allocates an offset in the TA's Thread Control Block for the TLS segment of
1189  * the @elf module.
1190  */
1191 #define TCB_HEAD_SIZE (2 * sizeof(long))
set_tls_offset(struct ta_elf * elf)1192 static void set_tls_offset(struct ta_elf *elf)
1193 {
1194 	static size_t next_offs = TCB_HEAD_SIZE;
1195 
1196 	if (!elf->tls_start)
1197 		return;
1198 
1199 	/* Module has a TLS segment */
1200 	elf->tls_tcb_offs = next_offs;
1201 	next_offs += elf->tls_memsz;
1202 }
1203 #else
set_tls_offset(struct ta_elf * elf __unused)1204 static void set_tls_offset(struct ta_elf *elf __unused) {}
1205 #endif
1206 
load_main(struct ta_elf * elf)1207 static void load_main(struct ta_elf *elf)
1208 {
1209 	vaddr_t va = 0;
1210 
1211 	init_elf(elf);
1212 	map_segments(elf);
1213 	populate_segments(elf);
1214 	add_dependencies(elf);
1215 	copy_section_headers(elf);
1216 	save_symtab(elf);
1217 	close_handle(elf);
1218 	set_tls_offset(elf);
1219 	parse_property_segment(elf);
1220 	if (elf->bti_enabled)
1221 		ta_elf_add_bti(elf);
1222 
1223 	if (!ta_elf_resolve_sym("ta_head", &va, NULL, elf))
1224 		elf->head = (struct ta_head *)va;
1225 	else
1226 		elf->head = (struct ta_head *)elf->load_addr;
1227 	if (elf->head->depr_entry != UINT64_MAX) {
1228 		/*
1229 		 * Legacy TAs sets their entry point in ta_head. For
1230 		 * non-legacy TAs the entry point of the ELF is set instead
1231 		 * and leaving the ta_head entry point set to UINT64_MAX to
1232 		 * indicate that it's not used.
1233 		 *
1234 		 * NB, everything before the commit a73b5878c89d ("Replace
1235 		 * ta_head.entry with elf entry") is considered legacy TAs
1236 		 * for ldelf.
1237 		 *
1238 		 * Legacy TAs cannot be mapped with shared memory segments
1239 		 * so restart the mapping if it turned out we're loading a
1240 		 * legacy TA.
1241 		 */
1242 
1243 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
1244 		clean_elf_load_main(elf);
1245 		elf->is_legacy = true;
1246 		init_elf(elf);
1247 		map_segments(elf);
1248 		populate_segments_legacy(elf);
1249 		add_dependencies(elf);
1250 		copy_section_headers(elf);
1251 		save_symtab(elf);
1252 		close_handle(elf);
1253 		elf->head = (struct ta_head *)elf->load_addr;
1254 		/*
1255 		 * Check that the TA is still a legacy TA, if it isn't give
1256 		 * up now since we're likely under attack.
1257 		 */
1258 		if (elf->head->depr_entry == UINT64_MAX)
1259 			err(TEE_ERROR_GENERIC,
1260 			    "TA %pUl was changed on disk to non-legacy",
1261 			    (void *)&elf->uuid);
1262 	}
1263 
1264 }
1265 
ta_elf_load_main(const TEE_UUID * uuid,uint32_t * is_32bit,uint64_t * sp,uint32_t * ta_flags)1266 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
1267 		      uint32_t *ta_flags)
1268 {
1269 	struct ta_elf *elf = queue_elf(uuid);
1270 	vaddr_t va = 0;
1271 	TEE_Result res = TEE_SUCCESS;
1272 
1273 	assert(elf);
1274 	elf->is_main = true;
1275 
1276 	load_main(elf);
1277 
1278 	*is_32bit = elf->is_32bit;
1279 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1280 	if (res)
1281 		err(res, "sys_map_zi stack");
1282 
1283 	if (elf->head->flags & ~TA_FLAGS_MASK)
1284 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1285 		    elf->head->flags & ~TA_FLAGS_MASK);
1286 
1287 	*ta_flags = elf->head->flags;
1288 	*sp = va + elf->head->stack_size;
1289 	ta_stack = va;
1290 	ta_stack_size = elf->head->stack_size;
1291 }
1292 
ta_elf_finalize_load_main(uint64_t * entry,uint64_t * load_addr)1293 void ta_elf_finalize_load_main(uint64_t *entry, uint64_t *load_addr)
1294 {
1295 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1296 	TEE_Result res = TEE_SUCCESS;
1297 
1298 	assert(elf->is_main);
1299 
1300 	res = ta_elf_set_init_fini_info_compat(elf->is_32bit);
1301 	if (res)
1302 		err(res, "ta_elf_set_init_fini_info_compat");
1303 	res = ta_elf_set_elf_phdr_info(elf->is_32bit);
1304 	if (res)
1305 		err(res, "ta_elf_set_elf_phdr_info");
1306 
1307 	if (elf->is_legacy)
1308 		*entry = elf->head->depr_entry;
1309 	else
1310 		*entry = elf->e_entry + elf->load_addr;
1311 
1312 	*load_addr = elf->load_addr;
1313 }
1314 
1315 
ta_elf_load_dependency(struct ta_elf * elf,bool is_32bit)1316 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1317 {
1318 	if (elf->is_main)
1319 		return;
1320 
1321 	init_elf(elf);
1322 	if (elf->is_32bit != is_32bit)
1323 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1324 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1325 		    is_32bit ? "32" : "64");
1326 
1327 	map_segments(elf);
1328 	populate_segments(elf);
1329 	add_dependencies(elf);
1330 	copy_section_headers(elf);
1331 	save_symtab(elf);
1332 	close_handle(elf);
1333 	set_tls_offset(elf);
1334 	parse_property_segment(elf);
1335 	if (elf->bti_enabled)
1336 		ta_elf_add_bti(elf);
1337 }
1338 
ta_elf_finalize_mappings(struct ta_elf * elf)1339 void ta_elf_finalize_mappings(struct ta_elf *elf)
1340 {
1341 	TEE_Result res = TEE_SUCCESS;
1342 	struct segment *seg = NULL;
1343 
1344 	if (!elf->is_legacy)
1345 		return;
1346 
1347 	TAILQ_FOREACH(seg, &elf->segs, link) {
1348 		vaddr_t va = elf->load_addr + seg->vaddr;
1349 		uint32_t flags =  0;
1350 
1351 		if (seg->flags & PF_W)
1352 			flags |= LDELF_MAP_FLAG_WRITEABLE;
1353 		if (seg->flags & PF_X)
1354 			flags |= LDELF_MAP_FLAG_EXECUTABLE;
1355 
1356 		res = sys_set_prot(va, seg->memsz, flags);
1357 		if (res)
1358 			err(res, "sys_set_prot");
1359 	}
1360 }
1361 
print_wrapper(void * pctx,print_func_t print_func,const char * fmt,...)1362 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1363 					 const char *fmt, ...)
1364 {
1365 	va_list ap;
1366 
1367 	va_start(ap, fmt);
1368 	print_func(pctx, fmt, ap);
1369 	va_end(ap);
1370 }
1371 
print_seg(void * pctx,print_func_t print_func,size_t idx __maybe_unused,int elf_idx __maybe_unused,vaddr_t va __maybe_unused,paddr_t pa __maybe_unused,size_t sz __maybe_unused,uint32_t flags)1372 static void print_seg(void *pctx, print_func_t print_func,
1373 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1374 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1375 		      size_t sz __maybe_unused, uint32_t flags)
1376 {
1377 	int rc __maybe_unused = 0;
1378 	int width __maybe_unused = 8;
1379 	char desc[14] __maybe_unused = "";
1380 	char flags_str[] __maybe_unused = "----";
1381 
1382 	if (elf_idx > -1) {
1383 		rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1384 		assert(rc >= 0);
1385 	} else {
1386 		if (flags & DUMP_MAP_EPHEM) {
1387 			rc = snprintf(desc, sizeof(desc), " (param)");
1388 			assert(rc >= 0);
1389 		}
1390 		if (flags & DUMP_MAP_LDELF) {
1391 			rc = snprintf(desc, sizeof(desc), " (ldelf)");
1392 			assert(rc >= 0);
1393 		}
1394 		if (va == ta_stack) {
1395 			rc = snprintf(desc, sizeof(desc), " (stack)");
1396 			assert(rc >= 0);
1397 		}
1398 	}
1399 
1400 	if (flags & DUMP_MAP_READ)
1401 		flags_str[0] = 'r';
1402 	if (flags & DUMP_MAP_WRITE)
1403 		flags_str[1] = 'w';
1404 	if (flags & DUMP_MAP_EXEC)
1405 		flags_str[2] = 'x';
1406 	if (flags & DUMP_MAP_SECURE)
1407 		flags_str[3] = 's';
1408 
1409 	print_wrapper(pctx, print_func,
1410 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1411 		      idx, width, va, width, pa, sz, flags_str, desc);
1412 }
1413 
get_next_in_order(struct ta_elf_queue * elf_queue,struct ta_elf ** elf,struct segment ** seg,size_t * elf_idx)1414 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1415 			      struct ta_elf **elf, struct segment **seg,
1416 			      size_t *elf_idx)
1417 {
1418 	struct ta_elf *e = NULL;
1419 	struct segment *s = NULL;
1420 	size_t idx = 0;
1421 	vaddr_t va = 0;
1422 	struct ta_elf *e2 = NULL;
1423 	size_t i2 = 0;
1424 
1425 	assert(elf && seg && elf_idx);
1426 	e = *elf;
1427 	s = *seg;
1428 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1429 
1430 	if (s) {
1431 		s = TAILQ_NEXT(s, link);
1432 		if (s) {
1433 			*seg = s;
1434 			return true;
1435 		}
1436 	}
1437 
1438 	if (e)
1439 		va = e->load_addr;
1440 
1441 	/* Find the ELF with next load address */
1442 	e = NULL;
1443 	TAILQ_FOREACH(e2, elf_queue, link) {
1444 		if (e2->load_addr > va) {
1445 			if (!e || e2->load_addr < e->load_addr) {
1446 				e = e2;
1447 				idx = i2;
1448 			}
1449 		}
1450 		i2++;
1451 	}
1452 	if (!e)
1453 		return false;
1454 
1455 	*elf = e;
1456 	*seg = TAILQ_FIRST(&e->segs);
1457 	*elf_idx = idx;
1458 	return true;
1459 }
1460 
ta_elf_print_mappings(void * pctx,print_func_t print_func,struct ta_elf_queue * elf_queue,size_t num_maps,struct dump_map * maps,vaddr_t mpool_base)1461 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1462 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1463 			   struct dump_map *maps, vaddr_t mpool_base)
1464 {
1465 	struct segment *seg = NULL;
1466 	struct ta_elf *elf = NULL;
1467 	size_t elf_idx = 0;
1468 	size_t idx = 0;
1469 	size_t map_idx = 0;
1470 
1471 	/*
1472 	 * Loop over all segments and maps, printing virtual address in
1473 	 * order. Segment has priority if the virtual address is present
1474 	 * in both map and segment.
1475 	 */
1476 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1477 	while (true) {
1478 		vaddr_t va = -1;
1479 		paddr_t pa = -1;
1480 		size_t sz = 0;
1481 		uint32_t flags = DUMP_MAP_SECURE;
1482 
1483 		if (seg) {
1484 			va = rounddown(seg->vaddr + elf->load_addr);
1485 			sz = roundup(seg->vaddr + seg->memsz) -
1486 				     rounddown(seg->vaddr);
1487 		}
1488 
1489 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1490 			uint32_t f = 0;
1491 
1492 			/* If there's a match, it should be the same map */
1493 			if (maps[map_idx].va == va) {
1494 				pa = maps[map_idx].pa;
1495 				/*
1496 				 * In shared libraries the first page is
1497 				 * mapped separately with the rest of that
1498 				 * segment following back to back in a
1499 				 * separate entry.
1500 				 */
1501 				if (map_idx + 1 < num_maps &&
1502 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1503 					vaddr_t next_va = maps[map_idx].va +
1504 							  maps[map_idx].sz;
1505 					size_t comb_sz = maps[map_idx].sz +
1506 							 maps[map_idx + 1].sz;
1507 
1508 					if (next_va == maps[map_idx + 1].va &&
1509 					    comb_sz == sz &&
1510 					    maps[map_idx].flags ==
1511 					    maps[map_idx + 1].flags) {
1512 						/* Skip this and next entry */
1513 						map_idx += 2;
1514 						continue;
1515 					}
1516 				}
1517 				assert(maps[map_idx].sz == sz);
1518 			} else if (maps[map_idx].va < va) {
1519 				if (maps[map_idx].va == mpool_base)
1520 					f |= DUMP_MAP_LDELF;
1521 				print_seg(pctx, print_func, idx, -1,
1522 					  maps[map_idx].va, maps[map_idx].pa,
1523 					  maps[map_idx].sz,
1524 					  maps[map_idx].flags | f);
1525 				idx++;
1526 			}
1527 			map_idx++;
1528 		}
1529 
1530 		if (!seg)
1531 			break;
1532 
1533 		if (seg->flags & PF_R)
1534 			flags |= DUMP_MAP_READ;
1535 		if (seg->flags & PF_W)
1536 			flags |= DUMP_MAP_WRITE;
1537 		if (seg->flags & PF_X)
1538 			flags |= DUMP_MAP_EXEC;
1539 
1540 		print_seg(pctx, print_func, idx, elf_idx, va, pa, sz, flags);
1541 		idx++;
1542 
1543 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1544 			seg = NULL;
1545 	}
1546 
1547 	elf_idx = 0;
1548 	TAILQ_FOREACH(elf, elf_queue, link) {
1549 		print_wrapper(pctx, print_func,
1550 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1551 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1552 		elf_idx++;
1553 	}
1554 }
1555 
1556 #ifdef CFG_UNWIND
1557 
1558 #if defined(ARM32) || defined(ARM64)
1559 /* Called by libunw */
find_exidx(vaddr_t addr,vaddr_t * idx_start,vaddr_t * idx_end)1560 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end)
1561 {
1562 	struct segment *seg = NULL;
1563 	struct ta_elf *elf = NULL;
1564 	vaddr_t a = 0;
1565 
1566 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1567 		if (addr < elf->load_addr)
1568 			continue;
1569 		a = addr - elf->load_addr;
1570 		TAILQ_FOREACH(seg, &elf->segs, link) {
1571 			if (a < seg->vaddr)
1572 				continue;
1573 			if (a - seg->vaddr < seg->filesz) {
1574 				*idx_start = elf->exidx_start + elf->load_addr;
1575 				*idx_end = elf->exidx_start + elf->load_addr +
1576 					   elf->exidx_size;
1577 				return true;
1578 			}
1579 		}
1580 	}
1581 
1582 	return false;
1583 }
1584 
ta_elf_stack_trace_a32(uint32_t regs[16])1585 void ta_elf_stack_trace_a32(uint32_t regs[16])
1586 {
1587 	struct unwind_state_arm32 state = { };
1588 
1589 	memcpy(state.registers, regs, sizeof(state.registers));
1590 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1591 }
1592 
ta_elf_stack_trace_a64(uint64_t fp,uint64_t sp,uint64_t pc)1593 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1594 {
1595 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1596 
1597 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1598 }
1599 #elif defined(RV32) || defined(RV64)
ta_elf_stack_trace_riscv(uint64_t fp,uint64_t pc)1600 void ta_elf_stack_trace_riscv(uint64_t fp, uint64_t pc)
1601 {
1602 	struct unwind_state_riscv state = { .fp = fp, .pc = pc };
1603 
1604 	print_stack_riscv(&state, ta_stack, ta_stack_size);
1605 }
1606 #endif
1607 
1608 #endif /* CFG_UNWIND */
1609 
ta_elf_add_library(const TEE_UUID * uuid)1610 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1611 {
1612 	TEE_Result res = TEE_ERROR_GENERIC;
1613 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1614 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1615 	struct ta_elf *elf = NULL;
1616 
1617 	if (lib)
1618 		return TEE_SUCCESS; /* Already mapped */
1619 
1620 	lib = queue_elf_helper(uuid);
1621 	if (!lib)
1622 		return TEE_ERROR_OUT_OF_MEMORY;
1623 
1624 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1625 		ta_elf_load_dependency(elf, ta->is_32bit);
1626 
1627 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1628 		ta_elf_relocate(elf);
1629 		ta_elf_finalize_mappings(elf);
1630 	}
1631 
1632 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1633 		DMSG("ELF (%pUl) at %#"PRIxVA,
1634 		     (void *)&elf->uuid, elf->load_addr);
1635 
1636 	res = ta_elf_set_init_fini_info_compat(ta->is_32bit);
1637 	if (res)
1638 		return res;
1639 
1640 	return ta_elf_set_elf_phdr_info(ta->is_32bit);
1641 }
1642 
1643 /* Get address/size of .init_array and .fini_array from the dynamic segment */
get_init_fini_array(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1644 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1645 				vaddr_t addr, size_t memsz, vaddr_t *init,
1646 				size_t *init_cnt, vaddr_t *fini,
1647 				size_t *fini_cnt)
1648 {
1649 	size_t addrsz = 0;
1650 	size_t dyn_entsize = 0;
1651 	size_t num_dyns = 0;
1652 	size_t n = 0;
1653 	unsigned int tag = 0;
1654 	size_t val = 0;
1655 
1656 	assert(type == PT_DYNAMIC);
1657 
1658 	check_phdr_in_range(elf, type, addr, memsz);
1659 
1660 	if (elf->is_32bit) {
1661 		dyn_entsize = sizeof(Elf32_Dyn);
1662 		addrsz = 4;
1663 	} else {
1664 		dyn_entsize = sizeof(Elf64_Dyn);
1665 		addrsz = 8;
1666 	}
1667 
1668 	assert(!(memsz % dyn_entsize));
1669 	num_dyns = memsz / dyn_entsize;
1670 
1671 	for (n = 0; n < num_dyns; n++) {
1672 		read_dyn(elf, addr, n, &tag, &val);
1673 		if (tag == DT_INIT_ARRAY)
1674 			*init = val + elf->load_addr;
1675 		else if (tag == DT_FINI_ARRAY)
1676 			*fini = val + elf->load_addr;
1677 		else if (tag == DT_INIT_ARRAYSZ)
1678 			*init_cnt = val / addrsz;
1679 		else if (tag == DT_FINI_ARRAYSZ)
1680 			*fini_cnt = val / addrsz;
1681 	}
1682 }
1683 
1684 /* Get address/size of .init_array and .fini_array in @elf (if present) */
elf_get_init_fini_array(struct ta_elf * elf,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1685 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1686 				    size_t *init_cnt, vaddr_t *fini,
1687 				    size_t *fini_cnt)
1688 {
1689 	size_t n = 0;
1690 
1691 	if (elf->is_32bit) {
1692 		Elf32_Phdr *phdr = elf->phdr;
1693 
1694 		for (n = 0; n < elf->e_phnum; n++) {
1695 			if (phdr[n].p_type == PT_DYNAMIC) {
1696 				get_init_fini_array(elf, phdr[n].p_type,
1697 						    phdr[n].p_vaddr,
1698 						    phdr[n].p_memsz,
1699 						    init, init_cnt, fini,
1700 						    fini_cnt);
1701 				return;
1702 			}
1703 		}
1704 	} else {
1705 		Elf64_Phdr *phdr = elf->phdr;
1706 
1707 		for (n = 0; n < elf->e_phnum; n++) {
1708 			if (phdr[n].p_type == PT_DYNAMIC) {
1709 				get_init_fini_array(elf, phdr[n].p_type,
1710 						    phdr[n].p_vaddr,
1711 						    phdr[n].p_memsz,
1712 						    init, init_cnt, fini,
1713 						    fini_cnt);
1714 				return;
1715 			}
1716 		}
1717 	}
1718 }
1719 
1720 /*
1721  * Deprecated by __elf_phdr_info below. Kept for compatibility.
1722  *
1723  * Pointers to ELF initialization and finalization functions are extracted by
1724  * ldelf and stored on the TA heap, then exported to the TA via the global
1725  * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism.
1726  */
1727 
1728 struct __init_fini {
1729 	uint32_t flags;
1730 	uint16_t init_size;
1731 	uint16_t fini_size;
1732 
1733 	void (**init)(void); /* @init_size entries */
1734 	void (**fini)(void); /* @fini_size entries */
1735 };
1736 
1737 #define __IFS_VALID            BIT(0)
1738 #define __IFS_INIT_HAS_RUN     BIT(1)
1739 #define __IFS_FINI_HAS_RUN     BIT(2)
1740 
1741 struct __init_fini_info {
1742 	uint32_t reserved;
1743 	uint16_t size;
1744 	uint16_t pad;
1745 	struct __init_fini *ifs; /* @size entries */
1746 };
1747 
1748 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */
1749 
1750 struct __init_fini32 {
1751 	uint32_t flags;
1752 	uint16_t init_size;
1753 	uint16_t fini_size;
1754 	uint32_t init;
1755 	uint32_t fini;
1756 };
1757 
1758 struct __init_fini_info32 {
1759 	uint32_t reserved;
1760 	uint16_t size;
1761 	uint16_t pad;
1762 	uint32_t ifs;
1763 };
1764 
realloc_ifs(vaddr_t va,size_t cnt,bool is_32bit)1765 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1766 {
1767 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1768 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1769 	struct __init_fini32 *ifs32 = NULL;
1770 	struct __init_fini *ifs = NULL;
1771 	size_t prev_cnt = 0;
1772 	void *ptr = NULL;
1773 
1774 	if (is_32bit) {
1775 		ptr = (void *)(vaddr_t)info32->ifs;
1776 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1777 		if (!ptr)
1778 			return TEE_ERROR_OUT_OF_MEMORY;
1779 		ifs32 = ptr;
1780 		prev_cnt = info32->size;
1781 		if (cnt > prev_cnt)
1782 			memset(ifs32 + prev_cnt, 0,
1783 			       (cnt - prev_cnt) * sizeof(*ifs32));
1784 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1785 		info32->size = cnt;
1786 	} else {
1787 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1788 		if (!ptr)
1789 			return TEE_ERROR_OUT_OF_MEMORY;
1790 		ifs = ptr;
1791 		prev_cnt = info->size;
1792 		if (cnt > prev_cnt)
1793 			memset(ifs + prev_cnt, 0,
1794 			       (cnt - prev_cnt) * sizeof(*ifs));
1795 		info->ifs = ifs;
1796 		info->size = cnt;
1797 	}
1798 
1799 	return TEE_SUCCESS;
1800 }
1801 
fill_ifs(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1802 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1803 {
1804 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1805 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1806 	struct __init_fini32 *ifs32 = NULL;
1807 	struct __init_fini *ifs = NULL;
1808 	size_t init_cnt = 0;
1809 	size_t fini_cnt = 0;
1810 	vaddr_t init = 0;
1811 	vaddr_t fini = 0;
1812 
1813 	if (is_32bit) {
1814 		assert(idx < info32->size);
1815 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1816 
1817 		if (ifs32->flags & __IFS_VALID)
1818 			return;
1819 
1820 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1821 					&fini_cnt);
1822 
1823 		ifs32->init = (uint32_t)init;
1824 		ifs32->init_size = init_cnt;
1825 
1826 		ifs32->fini = (uint32_t)fini;
1827 		ifs32->fini_size = fini_cnt;
1828 
1829 		ifs32->flags |= __IFS_VALID;
1830 	} else {
1831 		assert(idx < info->size);
1832 		ifs = &info->ifs[idx];
1833 
1834 		if (ifs->flags & __IFS_VALID)
1835 			return;
1836 
1837 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1838 					&fini_cnt);
1839 
1840 		ifs->init = (void (**)(void))init;
1841 		ifs->init_size = init_cnt;
1842 
1843 		ifs->fini = (void (**)(void))fini;
1844 		ifs->fini_size = fini_cnt;
1845 
1846 		ifs->flags |= __IFS_VALID;
1847 	}
1848 }
1849 
1850 /*
1851  * Set or update __init_fini_info in the TA with information from the ELF
1852  * queue
1853  */
ta_elf_set_init_fini_info_compat(bool is_32bit)1854 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit)
1855 {
1856 	struct __init_fini_info *info = NULL;
1857 	TEE_Result res = TEE_SUCCESS;
1858 	struct ta_elf *elf = NULL;
1859 	vaddr_t info_va = 0;
1860 	size_t cnt = 0;
1861 
1862 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL);
1863 	if (res) {
1864 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1865 			/*
1866 			 * Not an error, only TAs linked against libutee from
1867 			 * OP-TEE 3.9.0 have this symbol.
1868 			 */
1869 			return TEE_SUCCESS;
1870 		}
1871 		return res;
1872 	}
1873 	assert(info_va);
1874 
1875 	info = (struct __init_fini_info *)info_va;
1876 	if (info->reserved)
1877 		return TEE_ERROR_NOT_SUPPORTED;
1878 
1879 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1880 		cnt++;
1881 
1882 	/* Queue has at least one file (main) */
1883 	assert(cnt);
1884 
1885 	res = realloc_ifs(info_va, cnt, is_32bit);
1886 	if (res)
1887 		goto err;
1888 
1889 	cnt = 0;
1890 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1891 		fill_ifs(info_va, cnt, elf, is_32bit);
1892 		cnt++;
1893 	}
1894 
1895 	return TEE_SUCCESS;
1896 err:
1897 	free(info);
1898 	return res;
1899 }
1900 
realloc_elf_phdr_info(vaddr_t va,size_t cnt,bool is_32bit)1901 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit)
1902 {
1903 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1904 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1905 	struct dl_phdr_info32 *dlpi32 = NULL;
1906 	struct dl_phdr_info *dlpi = NULL;
1907 	size_t prev_cnt = 0;
1908 	void *ptr = NULL;
1909 
1910 	if (is_32bit) {
1911 		ptr = (void *)(vaddr_t)info32->dlpi;
1912 		ptr = realloc(ptr, cnt * sizeof(*dlpi32));
1913 		if (!ptr)
1914 			return TEE_ERROR_OUT_OF_MEMORY;
1915 		dlpi32 = ptr;
1916 		prev_cnt = info32->count;
1917 		if (cnt > prev_cnt)
1918 			memset(dlpi32 + prev_cnt, 0,
1919 			       (cnt - prev_cnt) * sizeof(*dlpi32));
1920 		info32->dlpi = (uint32_t)(vaddr_t)dlpi32;
1921 		info32->count = cnt;
1922 	} else {
1923 		ptr = realloc(info->dlpi, cnt * sizeof(*dlpi));
1924 		if (!ptr)
1925 			return TEE_ERROR_OUT_OF_MEMORY;
1926 		dlpi = ptr;
1927 		prev_cnt = info->count;
1928 		if (cnt > prev_cnt)
1929 			memset(dlpi + prev_cnt, 0,
1930 			       (cnt - prev_cnt) * sizeof(*dlpi));
1931 		info->dlpi = dlpi;
1932 		info->count = cnt;
1933 	}
1934 
1935 	return TEE_SUCCESS;
1936 }
1937 
fill_elf_phdr_info(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1938 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf,
1939 			       bool is_32bit)
1940 {
1941 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1942 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1943 	struct dl_phdr_info32 *dlpi32 = NULL;
1944 	struct dl_phdr_info *dlpi = NULL;
1945 
1946 	if (is_32bit) {
1947 		assert(idx < info32->count);
1948 		dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx;
1949 
1950 		dlpi32->dlpi_addr = elf->load_addr;
1951 		if (elf->soname)
1952 			dlpi32->dlpi_name = (vaddr_t)elf->soname;
1953 		else
1954 			dlpi32->dlpi_name = (vaddr_t)&info32->zero;
1955 		dlpi32->dlpi_phdr = (vaddr_t)elf->phdr;
1956 		dlpi32->dlpi_phnum = elf->e_phnum;
1957 		dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */
1958 		dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */
1959 		dlpi32->dlpi_tls_modid = elf->tls_mod_id;
1960 		dlpi32->dlpi_tls_data = elf->tls_start;
1961 	} else {
1962 		assert(idx < info->count);
1963 		dlpi = info->dlpi + idx;
1964 
1965 		dlpi->dlpi_addr = elf->load_addr;
1966 		if (elf->soname)
1967 			dlpi->dlpi_name = elf->soname;
1968 		else
1969 			dlpi->dlpi_name = &info32->zero;
1970 		dlpi->dlpi_phdr = elf->phdr;
1971 		dlpi->dlpi_phnum = elf->e_phnum;
1972 		dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */
1973 		dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */
1974 		dlpi->dlpi_tls_modid = elf->tls_mod_id;
1975 		dlpi->dlpi_tls_data = (void *)elf->tls_start;
1976 	}
1977 }
1978 
1979 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */
ta_elf_set_elf_phdr_info(bool is_32bit)1980 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit)
1981 {
1982 	struct __elf_phdr_info *info = NULL;
1983 	TEE_Result res = TEE_SUCCESS;
1984 	struct ta_elf *elf = NULL;
1985 	vaddr_t info_va = 0;
1986 	size_t cnt = 0;
1987 
1988 	res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL);
1989 	if (res) {
1990 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1991 			/* Older TA */
1992 			return TEE_SUCCESS;
1993 		}
1994 		return res;
1995 	}
1996 	assert(info_va);
1997 
1998 	info = (struct __elf_phdr_info *)info_va;
1999 	if (info->reserved)
2000 		return TEE_ERROR_NOT_SUPPORTED;
2001 
2002 	TAILQ_FOREACH(elf, &main_elf_queue, link)
2003 		cnt++;
2004 
2005 	res = realloc_elf_phdr_info(info_va, cnt, is_32bit);
2006 	if (res)
2007 		return res;
2008 
2009 	cnt = 0;
2010 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
2011 		fill_elf_phdr_info(info_va, cnt, elf, is_32bit);
2012 		cnt++;
2013 	}
2014 
2015 	return TEE_SUCCESS;
2016 }
2017