1 /*
2  * Copyright (c) 2023 Intel Corporation
3  * Copyright (c) 2024 Arduino SA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/elf.h>
10 #include <zephyr/llext/loader.h>
11 #include <zephyr/llext/llext.h>
12 #include <zephyr/llext/llext_internal.h>
13 #include <zephyr/kernel.h>
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
17 
18 #include <string.h>
19 
20 #include "llext_priv.h"
21 
22 /*
23  * NOTICE: Functions in this file do not clean up allocations in their error
24  * paths; instead, this is performed once and for all when leaving the parent
25  * `do_llext_load()` function. This approach consolidates memory management
26  * in a single place, simplifying error handling and reducing the risk of
27  * memory leaks.
28  *
29  * The following rationale applies:
30  *
31  * - The input `struct llext` and fields in `struct loader` are zero-filled
32  *   at the beginning of the do_llext_load function, so that every pointer is
33  *   set to NULL and every bool is false.
34  * - If some function called by do_llext_load allocates memory, it does so by
35  *   immediately writing the pointer in the `ext` and `ldr` structures.
36  * - do_llext_load() will clean up the memory allocated by the functions it
37  *   calls, taking into account if the load process was successful or not.
38  */
39 
40 static const char ELF_MAGIC[] = {0x7f, 'E', 'L', 'F'};
41 
llext_loaded_sect_ptr(struct llext_loader * ldr,struct llext * ext,unsigned int sh_ndx)42 const void *llext_loaded_sect_ptr(struct llext_loader *ldr, struct llext *ext, unsigned int sh_ndx)
43 {
44 	enum llext_mem mem_idx = ldr->sect_map[sh_ndx].mem_idx;
45 
46 	if (mem_idx == LLEXT_MEM_COUNT) {
47 		return NULL;
48 	}
49 
50 	return (const uint8_t *)ext->mem[mem_idx] + ldr->sect_map[sh_ndx].offset;
51 }
52 
53 /*
54  * Load basic ELF file data
55  */
56 
llext_load_elf_data(struct llext_loader * ldr,struct llext * ext)57 static int llext_load_elf_data(struct llext_loader *ldr, struct llext *ext)
58 {
59 	int ret;
60 
61 	/* read ELF header */
62 
63 	ret = llext_seek(ldr, 0);
64 	if (ret != 0) {
65 		LOG_ERR("Failed to seek for ELF header");
66 		return ret;
67 	}
68 
69 	ret = llext_read(ldr, &ldr->hdr, sizeof(ldr->hdr));
70 	if (ret != 0) {
71 		LOG_ERR("Failed to read ELF header");
72 		return ret;
73 	}
74 
75 	/* check whether this is a valid ELF file */
76 	if (memcmp(ldr->hdr.e_ident, ELF_MAGIC, sizeof(ELF_MAGIC)) != 0) {
77 		LOG_HEXDUMP_ERR(ldr->hdr.e_ident, 16, "Invalid ELF, magic does not match");
78 		return -ENOEXEC;
79 	}
80 
81 	switch (ldr->hdr.e_type) {
82 	case ET_REL:
83 		LOG_DBG("Loading relocatable ELF");
84 		break;
85 
86 	case ET_DYN:
87 		LOG_DBG("Loading shared ELF");
88 		break;
89 
90 	default:
91 		LOG_ERR("Unsupported ELF file type %x", ldr->hdr.e_type);
92 		return -ENOEXEC;
93 	}
94 
95 	/*
96 	 * Read all ELF section headers and initialize maps.  Buffers allocated
97 	 * below are freed when leaving do_llext_load(), so don't count them in
98 	 * alloc_size.
99 	 */
100 
101 	if (ldr->hdr.e_shentsize != sizeof(elf_shdr_t)) {
102 		LOG_ERR("Invalid section header size %d", ldr->hdr.e_shentsize);
103 		return -ENOEXEC;
104 	}
105 
106 	ext->sect_cnt = ldr->hdr.e_shnum;
107 
108 	size_t sect_map_sz = ext->sect_cnt * sizeof(ldr->sect_map[0]);
109 
110 	ldr->sect_map = llext_alloc_data(sect_map_sz);
111 	if (!ldr->sect_map) {
112 		LOG_ERR("Failed to allocate section map, size %zu", sect_map_sz);
113 		return -ENOMEM;
114 	}
115 	ext->alloc_size += sect_map_sz;
116 	for (int i = 0; i < ext->sect_cnt; i++) {
117 		ldr->sect_map[i].mem_idx = LLEXT_MEM_COUNT;
118 		ldr->sect_map[i].offset = 0;
119 	}
120 
121 	ext->sect_hdrs = (elf_shdr_t *)llext_peek(ldr, ldr->hdr.e_shoff);
122 	if (ext->sect_hdrs) {
123 		ext->sect_hdrs_on_heap = false;
124 	} else {
125 		size_t sect_hdrs_sz = ext->sect_cnt * sizeof(ext->sect_hdrs[0]);
126 
127 		ext->sect_hdrs_on_heap = true;
128 		ext->sect_hdrs = llext_alloc_data(sect_hdrs_sz);
129 		if (!ext->sect_hdrs) {
130 			LOG_ERR("Failed to allocate section headers, size %zu", sect_hdrs_sz);
131 			return -ENOMEM;
132 		}
133 
134 		ret = llext_seek(ldr, ldr->hdr.e_shoff);
135 		if (ret != 0) {
136 			LOG_ERR("Failed to seek for section headers");
137 			return ret;
138 		}
139 
140 		ret = llext_read(ldr, ext->sect_hdrs, sect_hdrs_sz);
141 		if (ret != 0) {
142 			LOG_ERR("Failed to read section headers");
143 			return ret;
144 		}
145 	}
146 
147 	return 0;
148 }
149 
150 /*
151  * Find all relevant string and symbol tables
152  */
llext_find_tables(struct llext_loader * ldr,struct llext * ext)153 static int llext_find_tables(struct llext_loader *ldr, struct llext *ext)
154 {
155 	int table_cnt, i;
156 	int shstrtab_ndx = ldr->hdr.e_shstrndx;
157 	int strtab_ndx = -1;
158 
159 	memset(ldr->sects, 0, sizeof(ldr->sects));
160 
161 	/* Find symbol and string tables */
162 	for (i = 0, table_cnt = 0; i < ext->sect_cnt && table_cnt < 3; ++i) {
163 		elf_shdr_t *shdr = ext->sect_hdrs + i;
164 
165 		LOG_DBG("section %d at %#zx: name %d, type %d, flags %#zx, "
166 			"addr %#zx, align %#zx, size %zd, link %d, info %d",
167 			i,
168 			(size_t)shdr->sh_offset,
169 			shdr->sh_name,
170 			shdr->sh_type,
171 			(size_t)shdr->sh_flags,
172 			(size_t)shdr->sh_addr,
173 			(size_t)shdr->sh_addralign,
174 			(size_t)shdr->sh_size,
175 			shdr->sh_link,
176 			shdr->sh_info);
177 
178 		if (shdr->sh_type == SHT_SYMTAB && ldr->hdr.e_type == ET_REL) {
179 			LOG_DBG("symtab at %d", i);
180 			ldr->sects[LLEXT_MEM_SYMTAB] = *shdr;
181 			ldr->sect_map[i].mem_idx = LLEXT_MEM_SYMTAB;
182 			strtab_ndx = shdr->sh_link;
183 			table_cnt++;
184 		} else if (shdr->sh_type == SHT_DYNSYM && ldr->hdr.e_type == ET_DYN) {
185 			LOG_DBG("dynsym at %d", i);
186 			ldr->sects[LLEXT_MEM_SYMTAB] = *shdr;
187 			ldr->sect_map[i].mem_idx = LLEXT_MEM_SYMTAB;
188 			strtab_ndx = shdr->sh_link;
189 			table_cnt++;
190 		} else if (shdr->sh_type == SHT_STRTAB && i == shstrtab_ndx) {
191 			LOG_DBG("shstrtab at %d", i);
192 			ldr->sects[LLEXT_MEM_SHSTRTAB] = *shdr;
193 			ldr->sect_map[i].mem_idx = LLEXT_MEM_SHSTRTAB;
194 			table_cnt++;
195 		} else if (shdr->sh_type == SHT_STRTAB && i == strtab_ndx) {
196 			LOG_DBG("strtab at %d", i);
197 			ldr->sects[LLEXT_MEM_STRTAB] = *shdr;
198 			ldr->sect_map[i].mem_idx = LLEXT_MEM_STRTAB;
199 			table_cnt++;
200 		}
201 	}
202 
203 	if (!ldr->sects[LLEXT_MEM_SHSTRTAB].sh_type ||
204 	    !ldr->sects[LLEXT_MEM_STRTAB].sh_type ||
205 	    !ldr->sects[LLEXT_MEM_SYMTAB].sh_type) {
206 		LOG_ERR("Some sections are missing or present multiple times!");
207 		return -ENOEXEC;
208 	}
209 
210 	return 0;
211 }
212 
213 /* First (bottom) and last (top) entries of a region, inclusive, for a specific field. */
214 #define REGION_BOT(reg, field) (size_t)(reg->field + reg->sh_info)
215 #define REGION_TOP(reg, field) (size_t)(reg->field + reg->sh_size - 1)
216 
217 /* Check if two regions x and y have any overlap on a given field. Any shared value counts. */
218 #define REGIONS_OVERLAP_ON(x, y, f) \
219 	((REGION_BOT(x, f) <= REGION_BOT(y, f) && REGION_TOP(x, f) >= REGION_BOT(y, f)) || \
220 	 (REGION_BOT(y, f) <= REGION_BOT(x, f) && REGION_TOP(y, f) >= REGION_BOT(x, f)))
221 
222 /*
223  * Loops through all defined ELF sections and collapses those with similar
224  * usage flags into LLEXT "regions", taking alignment constraints into account.
225  * Checks the generated regions for overlaps and calculates the offset of each
226  * section within its region.
227  *
228  * This information is stored in the ldr->sects and ldr->sect_map arrays.
229  */
llext_map_sections(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)230 static int llext_map_sections(struct llext_loader *ldr, struct llext *ext,
231 			      const struct llext_load_param *ldr_parm)
232 {
233 	int i, j;
234 	const char *name;
235 
236 	for (i = 0; i < ext->sect_cnt; ++i) {
237 		elf_shdr_t *shdr = ext->sect_hdrs + i;
238 
239 		name = llext_section_name(ldr, ext, shdr);
240 
241 		if (ldr->sect_map[i].mem_idx != LLEXT_MEM_COUNT) {
242 			LOG_DBG("section %d name %s already mapped to region %d",
243 				i, name, ldr->sect_map[i].mem_idx);
244 			continue;
245 		}
246 
247 		/* Identify the section type by its flags */
248 		enum llext_mem mem_idx;
249 
250 		switch (shdr->sh_type) {
251 		case SHT_NOBITS:
252 			mem_idx = LLEXT_MEM_BSS;
253 			break;
254 		case SHT_PROGBITS:
255 			if (shdr->sh_flags & SHF_EXECINSTR) {
256 				mem_idx = LLEXT_MEM_TEXT;
257 			} else if (shdr->sh_flags & SHF_WRITE) {
258 				mem_idx = LLEXT_MEM_DATA;
259 			} else {
260 				mem_idx = LLEXT_MEM_RODATA;
261 			}
262 			break;
263 		case SHT_PREINIT_ARRAY:
264 			mem_idx = LLEXT_MEM_PREINIT;
265 			break;
266 		case SHT_INIT_ARRAY:
267 			mem_idx = LLEXT_MEM_INIT;
268 			break;
269 		case SHT_FINI_ARRAY:
270 			mem_idx = LLEXT_MEM_FINI;
271 			break;
272 		default:
273 			mem_idx = LLEXT_MEM_COUNT;
274 			break;
275 		}
276 
277 		/* Special exception for .exported_sym */
278 		if (strcmp(name, ".exported_sym") == 0) {
279 			mem_idx = LLEXT_MEM_EXPORT;
280 		}
281 
282 		if (mem_idx == LLEXT_MEM_COUNT ||
283 		    !(shdr->sh_flags & SHF_ALLOC) ||
284 		    shdr->sh_size == 0) {
285 			LOG_DBG("section %d name %s skipped", i, name);
286 			continue;
287 		}
288 
289 		switch (mem_idx) {
290 		case LLEXT_MEM_PREINIT:
291 		case LLEXT_MEM_INIT:
292 		case LLEXT_MEM_FINI:
293 			if (shdr->sh_entsize != sizeof(void *) ||
294 			    shdr->sh_size % shdr->sh_entsize != 0) {
295 				LOG_ERR("Invalid %s array in section %d", name, i);
296 				return -ENOEXEC;
297 			}
298 		default:
299 			break;
300 		}
301 
302 		LOG_DBG("section %d name %s maps to region %d", i, name, mem_idx);
303 
304 		ldr->sect_map[i].mem_idx = mem_idx;
305 		elf_shdr_t *region = ldr->sects + mem_idx;
306 
307 		/*
308 		 * Some applications may require specific ELF sections to not
309 		 * be included in their default memory regions; e.g. the ELF
310 		 * file may contain executable sections that are designed to be
311 		 * placed in slower memory. Don't merge such sections into main
312 		 * regions.
313 		 */
314 		if (ldr_parm->section_detached && ldr_parm->section_detached(shdr)) {
315 			if (mem_idx == LLEXT_MEM_TEXT &&
316 			    !INSTR_FETCHABLE(llext_peek(ldr, shdr->sh_offset), shdr->sh_size)) {
317 #ifdef CONFIG_ARC
318 				LOG_ERR("ELF buffer's detached text section %s not in instruction "
319 					"memory: %p-%p",
320 					name, (void *)(llext_peek(ldr, shdr->sh_offset)),
321 					(void *)((char *)llext_peek(ldr, shdr->sh_offset) +
322 						 shdr->sh_size));
323 				return -ENOEXEC;
324 #else
325 				LOG_WRN("Unknown if ELF buffer's detached text section %s is in "
326 					"instruction memory; proceeding...",
327 					name);
328 #endif
329 			}
330 			continue;
331 		}
332 
333 		if (region->sh_type == SHT_NULL) {
334 			/* First section of this type, copy all info to the
335 			 * region descriptor.
336 			 */
337 			memcpy(region, shdr, sizeof(*region));
338 			continue;
339 		}
340 
341 		/* Make sure this section is compatible with the existing region */
342 		if ((shdr->sh_flags & SHF_BASIC_TYPE_MASK) !=
343 		    (region->sh_flags & SHF_BASIC_TYPE_MASK)) {
344 			LOG_ERR("Unsupported section flags %#x / %#x for %s (region %d)",
345 				(uint32_t)shdr->sh_flags, (uint32_t)region->sh_flags,
346 				name, mem_idx);
347 			return -ENOEXEC;
348 		}
349 
350 		/* Check if this region type is extendable */
351 		switch (mem_idx) {
352 		case LLEXT_MEM_BSS:
353 			/* SHT_NOBITS sections cannot be merged properly:
354 			 * as they use no space in the file, the logic
355 			 * below does not work; they must be treated as
356 			 * independent entities.
357 			 */
358 			LOG_ERR("Multiple SHT_NOBITS sections are not supported");
359 			return -ENOTSUP;
360 		case LLEXT_MEM_PREINIT:
361 		case LLEXT_MEM_INIT:
362 		case LLEXT_MEM_FINI:
363 			/* These regions are not extendable and must be
364 			 * referenced at most once in the ELF file.
365 			 */
366 			LOG_ERR("Region %d redefined", mem_idx);
367 			return -ENOEXEC;
368 		default:
369 			break;
370 		}
371 
372 		if (ldr->hdr.e_type == ET_DYN) {
373 			/* In shared objects, sh_addr is the VMA.
374 			 * Before merging this section in the region,
375 			 * make sure the delta in VMAs matches that of
376 			 * file offsets.
377 			 */
378 			if (shdr->sh_addr - region->sh_addr !=
379 			    shdr->sh_offset - region->sh_offset) {
380 				LOG_ERR("Incompatible section addresses for %s (region %d)",
381 					name, mem_idx);
382 				return -ENOEXEC;
383 			}
384 		}
385 
386 		/*
387 		 * Extend the current region to include the new section
388 		 * (overlaps are detected later)
389 		 */
390 		size_t address = MIN(region->sh_addr, shdr->sh_addr);
391 		size_t bot_ofs = MIN(region->sh_offset, shdr->sh_offset);
392 		size_t top_ofs = MAX(region->sh_offset + region->sh_size,
393 				     shdr->sh_offset + shdr->sh_size);
394 		size_t addralign = MAX(region->sh_addralign, shdr->sh_addralign);
395 
396 		region->sh_addr = address;
397 		region->sh_offset = bot_ofs;
398 		region->sh_size = top_ofs - bot_ofs;
399 		region->sh_addralign = addralign;
400 	}
401 
402 	/*
403 	 * Make sure each of the mapped sections satisfies its alignment
404 	 * requirement when placed in the region.
405 	 *
406 	 * The ELF standard already guarantees that each section's offset in
407 	 * the file satisfies its own alignment, and since only powers of 2 can
408 	 * be specified, a solution satisfying the largest alignment will also
409 	 * work for any smaller one. Aligning the ELF region to the largest
410 	 * requirement among the contained sections will then guarantee that
411 	 * all are properly aligned.
412 	 *
413 	 * However, adjusting the region's start address will make the region
414 	 * appear larger than it actually is, and might even make it overlap
415 	 * with others. To allow for further precise adjustments, the length of
416 	 * the calculated pre-padding area is stored in the 'sh_info' field of
417 	 * the region descriptor, which is not used on any SHF_ALLOC section.
418 	 */
419 	for (i = 0; i < LLEXT_MEM_COUNT; i++) {
420 		elf_shdr_t *region = ldr->sects + i;
421 
422 		if (region->sh_type == SHT_NULL || region->sh_size == 0) {
423 			/* Skip empty regions */
424 			continue;
425 		}
426 
427 		size_t prepad = region->sh_offset & (region->sh_addralign - 1);
428 
429 		if (ldr->hdr.e_type == ET_DYN) {
430 			/* Only shared files populate sh_addr fields */
431 			if (prepad > region->sh_addr) {
432 				LOG_ERR("Bad section alignment in region %d", i);
433 				return -ENOEXEC;
434 			}
435 
436 			region->sh_addr -= prepad;
437 		}
438 		region->sh_offset -= prepad;
439 		region->sh_size += prepad;
440 		region->sh_info = prepad;
441 	}
442 
443 	/*
444 	 * Test that no computed region overlaps. This can happen if sections of
445 	 * different llext_mem type are interleaved in the ELF file or in VMAs.
446 	 */
447 	for (i = 0; i < LLEXT_MEM_COUNT; i++) {
448 		for (j = i+1; j < LLEXT_MEM_COUNT; j++) {
449 			elf_shdr_t *x = ldr->sects + i;
450 			elf_shdr_t *y = ldr->sects + j;
451 
452 			if (x->sh_type == SHT_NULL || x->sh_size == 0 ||
453 			    y->sh_type == SHT_NULL || y->sh_size == 0) {
454 				/* Skip empty regions */
455 				continue;
456 			}
457 
458 			/*
459 			 * The export symbol table may be surrounded by
460 			 * other data sections. Ignore overlaps in that
461 			 * case.
462 			 */
463 			if ((i == LLEXT_MEM_DATA || i == LLEXT_MEM_RODATA) &&
464 			    j == LLEXT_MEM_EXPORT) {
465 				continue;
466 			}
467 
468 			/*
469 			 * Exported symbols region can also overlap
470 			 * with rodata.
471 			 */
472 			if (i == LLEXT_MEM_EXPORT || j == LLEXT_MEM_EXPORT) {
473 				continue;
474 			}
475 
476 			if ((ldr->hdr.e_type == ET_DYN) &&
477 			    (x->sh_flags & SHF_ALLOC) && (y->sh_flags & SHF_ALLOC)) {
478 				/*
479 				 * Test regions that have VMA ranges for overlaps
480 				 */
481 				if (REGIONS_OVERLAP_ON(x, y, sh_addr)) {
482 					LOG_ERR("Region %d VMA range (%#zx-%#zx) "
483 						"overlaps with %d (%#zx-%#zx)",
484 						i, REGION_BOT(x, sh_addr), REGION_TOP(x, sh_addr),
485 						j, REGION_BOT(y, sh_addr), REGION_TOP(y, sh_addr));
486 					return -ENOEXEC;
487 				}
488 			}
489 
490 			/*
491 			 * Test file offsets. BSS sections store no
492 			 * data in the file and must not be included
493 			 * in checks to avoid false positives.
494 			 */
495 			if (i == LLEXT_MEM_BSS || j == LLEXT_MEM_BSS) {
496 				continue;
497 			}
498 
499 			if (REGIONS_OVERLAP_ON(x, y, sh_offset)) {
500 				LOG_ERR("Region %d ELF file range (%#zx-%#zx) "
501 					"overlaps with %d (%#zx-%#zx)",
502 					i, REGION_BOT(x, sh_offset), REGION_TOP(x, sh_offset),
503 					j, REGION_BOT(y, sh_offset), REGION_TOP(y, sh_offset));
504 				return -ENOEXEC;
505 			}
506 		}
507 	}
508 
509 	/*
510 	 * Calculate each ELF section's offset inside its memory region. This
511 	 * is done as a separate pass so the final regions are already defined.
512 	 * Also mark the regions that include relocation targets.
513 	 */
514 	for (i = 0; i < ext->sect_cnt; ++i) {
515 		elf_shdr_t *shdr = ext->sect_hdrs + i;
516 		enum llext_mem mem_idx = ldr->sect_map[i].mem_idx;
517 
518 		if (shdr->sh_type == SHT_REL || shdr->sh_type == SHT_RELA) {
519 			enum llext_mem target_region = ldr->sect_map[shdr->sh_info].mem_idx;
520 
521 			if (target_region != LLEXT_MEM_COUNT) {
522 				ldr->sects[target_region].sh_flags |= SHF_LLEXT_HAS_RELOCS;
523 			}
524 		}
525 
526 		if (mem_idx != LLEXT_MEM_COUNT) {
527 			ldr->sect_map[i].offset = shdr->sh_offset - ldr->sects[mem_idx].sh_offset;
528 		}
529 	}
530 
531 	return 0;
532 }
533 
llext_count_export_syms(struct llext_loader * ldr,struct llext * ext)534 static int llext_count_export_syms(struct llext_loader *ldr, struct llext *ext)
535 {
536 	size_t ent_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_entsize;
537 	size_t syms_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_size;
538 	int sym_cnt = syms_size / sizeof(elf_sym_t);
539 	const char *name;
540 	elf_sym_t sym;
541 	int i, ret;
542 	size_t pos;
543 
544 	LOG_DBG("symbol count %u", sym_cnt);
545 
546 	ext->sym_tab.sym_cnt = 0;
547 	for (i = 0, pos = ldr->sects[LLEXT_MEM_SYMTAB].sh_offset;
548 	     i < sym_cnt;
549 	     i++, pos += ent_size) {
550 		if (!i) {
551 			/* A dummy entry */
552 			continue;
553 		}
554 
555 		ret = llext_seek(ldr, pos);
556 		if (ret != 0) {
557 			return ret;
558 		}
559 
560 		ret = llext_read(ldr, &sym, ent_size);
561 		if (ret != 0) {
562 			return ret;
563 		}
564 
565 		uint32_t stt = ELF_ST_TYPE(sym.st_info);
566 		uint32_t stb = ELF_ST_BIND(sym.st_info);
567 		uint32_t sect = sym.st_shndx;
568 
569 		name = llext_symbol_name(ldr, ext, &sym);
570 
571 		if ((stt == STT_FUNC || stt == STT_OBJECT) && stb == STB_GLOBAL) {
572 			LOG_DBG("function symbol %d, name %s, type tag %d, bind %d, sect %d",
573 				i, name, stt, stb, sect);
574 			ext->sym_tab.sym_cnt++;
575 		} else {
576 			LOG_DBG("unhandled symbol %d, name %s, type tag %d, bind %d, sect %d",
577 				i, name, stt, stb, sect);
578 		}
579 	}
580 
581 	return 0;
582 }
583 
llext_allocate_symtab(struct llext_loader * ldr,struct llext * ext)584 static int llext_allocate_symtab(struct llext_loader *ldr, struct llext *ext)
585 {
586 	struct llext_symtable *sym_tab = &ext->sym_tab;
587 	size_t syms_size = sym_tab->sym_cnt * sizeof(struct llext_symbol);
588 
589 	sym_tab->syms = llext_alloc_data(syms_size);
590 	if (!sym_tab->syms) {
591 		return -ENOMEM;
592 	}
593 	memset(sym_tab->syms, 0, syms_size);
594 	ext->alloc_size += syms_size;
595 
596 	return 0;
597 }
598 
llext_export_symbols(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)599 static int llext_export_symbols(struct llext_loader *ldr, struct llext *ext,
600 				const struct llext_load_param *ldr_parm)
601 {
602 	struct llext_symtable *exp_tab = &ext->exp_tab;
603 	struct llext_symbol *sym;
604 	unsigned int i;
605 
606 	if (IS_ENABLED(CONFIG_LLEXT_IMPORT_ALL_GLOBALS)) {
607 		/* Use already discovered global symbols */
608 		exp_tab->sym_cnt = ext->sym_tab.sym_cnt;
609 		sym = ext->sym_tab.syms;
610 	} else {
611 		/* Only use symbols in the .exported_sym section */
612 		exp_tab->sym_cnt = ldr->sects[LLEXT_MEM_EXPORT].sh_size
613 				   / sizeof(struct llext_symbol);
614 		sym = ext->mem[LLEXT_MEM_EXPORT];
615 	}
616 
617 	if (!exp_tab->sym_cnt) {
618 		/* No symbols exported */
619 		return 0;
620 	}
621 
622 	exp_tab->syms = llext_alloc_data(exp_tab->sym_cnt * sizeof(struct llext_symbol));
623 	if (!exp_tab->syms) {
624 		return -ENOMEM;
625 	}
626 
627 	for (i = 0; i < exp_tab->sym_cnt; i++, sym++) {
628 		/*
629 		 * Offsets in objects, built for pre-defined addresses have to
630 		 * be translated to memory locations for symbol name access
631 		 * during dependency resolution.
632 		 */
633 		const char *name = NULL;
634 
635 		if (ldr_parm->pre_located) {
636 			ssize_t name_offset = llext_file_offset(ldr, (uintptr_t)sym->name);
637 
638 			if (name_offset > 0) {
639 				name = llext_peek(ldr, name_offset);
640 			}
641 		}
642 		if (!name) {
643 			name = sym->name;
644 		}
645 
646 		exp_tab->syms[i].name = name;
647 		exp_tab->syms[i].addr = sym->addr;
648 		LOG_DBG("sym %p name %s", sym->addr, sym->name);
649 	}
650 
651 	return 0;
652 }
653 
llext_copy_symbols(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)654 static int llext_copy_symbols(struct llext_loader *ldr, struct llext *ext,
655 			      const struct llext_load_param *ldr_parm)
656 {
657 	size_t ent_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_entsize;
658 	size_t syms_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_size;
659 	int sym_cnt = syms_size / sizeof(elf_sym_t);
660 	struct llext_symtable *sym_tab = &ext->sym_tab;
661 	elf_sym_t sym;
662 	int i, j, ret;
663 	size_t pos;
664 
665 	for (i = 0, pos = ldr->sects[LLEXT_MEM_SYMTAB].sh_offset, j = 0;
666 	     i < sym_cnt;
667 	     i++, pos += ent_size) {
668 		if (!i) {
669 			/* A dummy entry */
670 			continue;
671 		}
672 
673 		ret = llext_seek(ldr, pos);
674 		if (ret != 0) {
675 			return ret;
676 		}
677 
678 		ret = llext_read(ldr, &sym, ent_size);
679 		if (ret != 0) {
680 			return ret;
681 		}
682 
683 		uint32_t stt = ELF_ST_TYPE(sym.st_info);
684 		uint32_t stb = ELF_ST_BIND(sym.st_info);
685 		unsigned int shndx = sym.st_shndx;
686 
687 		if ((stt == STT_FUNC || stt == STT_OBJECT) &&
688 		    stb == STB_GLOBAL && shndx != SHN_UNDEF) {
689 			const char *name = llext_symbol_name(ldr, ext, &sym);
690 
691 			__ASSERT(j <= sym_tab->sym_cnt, "Miscalculated symbol number %u\n", j);
692 
693 			sym_tab->syms[j].name = name;
694 
695 			elf_shdr_t *shdr = ext->sect_hdrs + shndx;
696 			uintptr_t section_addr = shdr->sh_addr;
697 
698 			if (ldr_parm->pre_located &&
699 			    (!ldr_parm->section_detached || !ldr_parm->section_detached(shdr))) {
700 				sym_tab->syms[j].addr = (uint8_t *)sym.st_value +
701 					(ldr->hdr.e_type == ET_REL ? section_addr : 0);
702 			} else {
703 				const void *base;
704 
705 				base = llext_loaded_sect_ptr(ldr, ext, shndx);
706 				if (!base) {
707 					/* If the section is not mapped, try to peek.
708 					 * Be noisy about it, since this is addressing
709 					 * data that was missed by llext_map_sections.
710 					 */
711 					base = llext_peek(ldr, shdr->sh_offset);
712 					if (base) {
713 						LOG_DBG("section %d peeked at %p", shndx, base);
714 					} else {
715 						LOG_ERR("No data for section %d", shndx);
716 						return -ENOTSUP;
717 					}
718 				}
719 
720 				sym_tab->syms[j].addr = (uint8_t *)base + sym.st_value -
721 					(ldr->hdr.e_type == ET_REL ? 0 : section_addr);
722 			}
723 
724 			LOG_DBG("function symbol %d name %s addr %p",
725 				j, name, sym_tab->syms[j].addr);
726 			j++;
727 		}
728 	}
729 
730 	return 0;
731 }
732 
733 /*
734  * Load a valid ELF as an extension
735  */
do_llext_load(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)736 int do_llext_load(struct llext_loader *ldr, struct llext *ext,
737 		  const struct llext_load_param *ldr_parm)
738 {
739 	const struct llext_load_param default_ldr_parm = LLEXT_LOAD_PARAM_DEFAULT;
740 	int ret;
741 
742 	if (!ldr_parm) {
743 		ldr_parm = &default_ldr_parm;
744 	}
745 
746 	/* Zero all memory that is affected by the loading process
747 	 * (see the NOTICE at the top of this file).
748 	 */
749 	memset(ext, 0, sizeof(*ext));
750 	ldr->sect_map = NULL;
751 
752 	LOG_DBG("Loading ELF data...");
753 	ret = llext_prepare(ldr);
754 	if (ret != 0) {
755 		LOG_ERR("Failed to prepare the loader, ret %d", ret);
756 		goto out;
757 	}
758 
759 	ret = llext_load_elf_data(ldr, ext);
760 	if (ret != 0) {
761 		LOG_ERR("Failed to load basic ELF data, ret %d", ret);
762 		goto out;
763 	}
764 
765 	LOG_DBG("Finding ELF tables...");
766 	ret = llext_find_tables(ldr, ext);
767 	if (ret != 0) {
768 		LOG_ERR("Failed to find important ELF tables, ret %d", ret);
769 		goto out;
770 	}
771 
772 	LOG_DBG("Allocate and copy strings...");
773 	ret = llext_copy_strings(ldr, ext, ldr_parm);
774 	if (ret != 0) {
775 		LOG_ERR("Failed to copy ELF string sections, ret %d", ret);
776 		goto out;
777 	}
778 
779 	LOG_DBG("Mapping ELF sections...");
780 	ret = llext_map_sections(ldr, ext, ldr_parm);
781 	if (ret != 0) {
782 		LOG_ERR("Failed to map ELF sections, ret %d", ret);
783 		goto out;
784 	}
785 
786 	LOG_DBG("Allocate and copy regions...");
787 	ret = llext_copy_regions(ldr, ext, ldr_parm);
788 	if (ret != 0) {
789 		LOG_ERR("Failed to copy regions, ret %d", ret);
790 		goto out;
791 	}
792 
793 	LOG_DBG("Counting exported symbols...");
794 	ret = llext_count_export_syms(ldr, ext);
795 	if (ret != 0) {
796 		LOG_ERR("Failed to count exported ELF symbols, ret %d", ret);
797 		goto out;
798 	}
799 
800 	LOG_DBG("Allocating memory for symbol table...");
801 	ret = llext_allocate_symtab(ldr, ext);
802 	if (ret != 0) {
803 		LOG_ERR("Failed to allocate extension symbol table, ret %d", ret);
804 		goto out;
805 	}
806 
807 	LOG_DBG("Copying symbols...");
808 	ret = llext_copy_symbols(ldr, ext, ldr_parm);
809 	if (ret != 0) {
810 		LOG_ERR("Failed to copy symbols, ret %d", ret);
811 		goto out;
812 	}
813 
814 	if (ldr_parm->relocate_local) {
815 		LOG_DBG("Linking ELF...");
816 		ret = llext_link(ldr, ext, ldr_parm);
817 		if (ret != 0) {
818 			LOG_ERR("Failed to link, ret %d", ret);
819 			goto out;
820 		}
821 	}
822 
823 	ret = llext_export_symbols(ldr, ext, ldr_parm);
824 	if (ret != 0) {
825 		LOG_ERR("Failed to export, ret %d", ret);
826 		goto out;
827 	}
828 
829 	if (!ldr_parm->pre_located) {
830 		llext_adjust_mmu_permissions(ext);
831 	}
832 
833 out:
834 	/*
835 	 * Free resources only used during loading, unless explicitly requested.
836 	 * Note that this exploits the fact that freeing a NULL pointer has no effect.
837 	 */
838 
839 	if (ret != 0 || !ldr_parm->keep_section_info) {
840 		llext_free_inspection_data(ldr, ext);
841 	}
842 
843 	/* Until proper inter-llext linking is implemented, the symbol table is
844 	 * not useful outside of the loading process; keep it only if debugging
845 	 * is enabled and no error is detected.
846 	 */
847 	if (!(IS_ENABLED(CONFIG_LLEXT_LOG_LEVEL_DBG) && ret == 0)) {
848 		llext_free(ext->sym_tab.syms);
849 		ext->sym_tab.sym_cnt = 0;
850 		ext->sym_tab.syms = NULL;
851 	}
852 
853 	if (ret != 0) {
854 		LOG_DBG("Failed to load extension: %d", ret);
855 
856 		/* Since the loading process failed, free the resources that
857 		 * were allocated for the lifetime of the extension as well,
858 		 * such as regions and exported symbols.
859 		 */
860 		llext_free_regions(ext);
861 		llext_free(ext->exp_tab.syms);
862 		ext->exp_tab.sym_cnt = 0;
863 		ext->exp_tab.syms = NULL;
864 	} else {
865 		LOG_DBG("Loaded llext: %zu bytes in heap, .text at %p, .rodata at %p",
866 			ext->alloc_size, ext->mem[LLEXT_MEM_TEXT], ext->mem[LLEXT_MEM_RODATA]);
867 	}
868 
869 	llext_finalize(ldr);
870 
871 	return ret;
872 }
873 
llext_free_inspection_data(struct llext_loader * ldr,struct llext * ext)874 int llext_free_inspection_data(struct llext_loader *ldr, struct llext *ext)
875 {
876 	if (ldr->sect_map) {
877 		ext->alloc_size -= ext->sect_cnt * sizeof(ldr->sect_map[0]);
878 		llext_free(ldr->sect_map);
879 		ldr->sect_map = NULL;
880 	}
881 
882 	return 0;
883 }
884