1 /*
2  * Copyright (c) 2023 Intel Corporation
3  * Copyright (c) 2024 Arduino SA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/loader.h>
10 #include <zephyr/llext/llext.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/cache.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
16 
17 #include <string.h>
18 
19 #include "llext_priv.h"
20 
21 #ifdef CONFIG_MMU_PAGE_SIZE
22 #define LLEXT_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
23 #elif CONFIG_ARC_MPU_VER == 2
24 #define LLEXT_PAGE_SIZE 2048
25 #else
26 /* Arm and non-v2 ARC MPUs want a 32 byte minimum MPU region */
27 #define LLEXT_PAGE_SIZE 32
28 #endif
29 
30 #ifdef CONFIG_LLEXT_HEAP_DYNAMIC
31 #ifdef CONFIG_HARVARD
32 struct k_heap llext_instr_heap;
33 struct k_heap llext_data_heap;
34 #else
35 struct k_heap llext_heap;
36 #endif
37 bool llext_heap_inited;
38 #else
39 #ifdef CONFIG_HARVARD
40 Z_HEAP_DEFINE_IN_SECT(llext_instr_heap, (CONFIG_LLEXT_INSTR_HEAP_SIZE * KB(1)),
41 		      __attribute__((section(".rodata.llext_instr_heap"))));
42 Z_HEAP_DEFINE_IN_SECT(llext_data_heap, (CONFIG_LLEXT_DATA_HEAP_SIZE * KB(1)),
43 		      __attribute__((section(".data.llext_data_heap"))));
44 #else
45 K_HEAP_DEFINE(llext_heap, CONFIG_LLEXT_HEAP_SIZE * KB(1));
46 #endif
47 #endif
48 
49 /*
50  * Initialize the memory partition associated with the specified memory region
51  */
llext_init_mem_part(struct llext * ext,enum llext_mem mem_idx,uintptr_t start,size_t len)52 static void llext_init_mem_part(struct llext *ext, enum llext_mem mem_idx,
53 			uintptr_t start, size_t len)
54 {
55 #ifdef CONFIG_USERSPACE
56 	if (mem_idx < LLEXT_MEM_PARTITIONS) {
57 		ext->mem_parts[mem_idx].start = start;
58 		ext->mem_parts[mem_idx].size = len;
59 
60 		switch (mem_idx) {
61 		case LLEXT_MEM_TEXT:
62 			ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RX_U_RX;
63 			break;
64 		case LLEXT_MEM_DATA:
65 		case LLEXT_MEM_BSS:
66 			ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RW_U_RW;
67 			break;
68 		case LLEXT_MEM_RODATA:
69 			ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RO_U_RO;
70 			break;
71 		default:
72 			break;
73 		}
74 	}
75 #endif
76 
77 	LOG_DBG("region %d: start %#zx, size %zd", mem_idx, (size_t)start, len);
78 }
79 
llext_copy_region(struct llext_loader * ldr,struct llext * ext,enum llext_mem mem_idx,const struct llext_load_param * ldr_parm)80 static int llext_copy_region(struct llext_loader *ldr, struct llext *ext,
81 			      enum llext_mem mem_idx, const struct llext_load_param *ldr_parm)
82 {
83 	int ret;
84 	elf_shdr_t *region = ldr->sects + mem_idx;
85 	uintptr_t region_alloc = region->sh_size;
86 	uintptr_t region_align = region->sh_addralign;
87 
88 	if (!region_alloc) {
89 		return 0;
90 	}
91 	ext->mem_size[mem_idx] = region_alloc;
92 
93 	/*
94 	 * Calculate the minimum region size and alignment that can satisfy
95 	 * MMU/MPU requirements. This only applies to regions that contain
96 	 * program-accessible data (not to string tables, for example).
97 	 */
98 	if (region->sh_flags & SHF_ALLOC) {
99 		if (IS_ENABLED(CONFIG_ARM_MPU) || IS_ENABLED(CONFIG_ARC_MPU)) {
100 			/* On ARM with an MPU, regions must be sized and
101 			 * aligned to the same power of two (larger than 32).
102 			 */
103 			uintptr_t block_sz = MAX(MAX(region_alloc, region_align), LLEXT_PAGE_SIZE);
104 
105 			block_sz = 1 << LOG2CEIL(block_sz); /* align to next power of two */
106 			region_alloc = block_sz;
107 			region_align = block_sz;
108 		} else if (IS_ENABLED(CONFIG_MMU)) {
109 			/* MMU targets map memory in page-sized chunks. Round
110 			 * the region to multiples of those.
111 			 */
112 			region_alloc = ROUND_UP(region_alloc, LLEXT_PAGE_SIZE);
113 			region_align = MAX(region_align, LLEXT_PAGE_SIZE);
114 		}
115 	}
116 
117 	if (ldr->storage == LLEXT_STORAGE_WRITABLE ||           /* writable storage         */
118 	    (ldr->storage == LLEXT_STORAGE_PERSISTENT &&        /* || persistent storage    */
119 	     !(region->sh_flags & SHF_WRITE) &&                 /*    && read-only region   */
120 	     !(region->sh_flags & SHF_LLEXT_HAS_RELOCS))) {     /*    && no relocs to apply */
121 		/*
122 		 * Try to reuse data areas from the ELF buffer, if possible.
123 		 * If any of the following tests fail, a normal allocation
124 		 * will be attempted.
125 		 */
126 		if (region->sh_type != SHT_NOBITS) {
127 			/* Region has data in the file, check if peek() is supported */
128 			ext->mem[mem_idx] = llext_peek(ldr, region->sh_offset);
129 			if (ext->mem[mem_idx]) {
130 				if ((IS_ALIGNED(ext->mem[mem_idx], region_align) ||
131 				     ldr_parm->pre_located) &&
132 				    ((mem_idx != LLEXT_MEM_TEXT) ||
133 				     INSTR_FETCHABLE(ext->mem[mem_idx], region_alloc))) {
134 					/* Map this region directly to the ELF buffer */
135 					llext_init_mem_part(ext, mem_idx,
136 							    (uintptr_t)ext->mem[mem_idx],
137 							    region_alloc);
138 					ext->mem_on_heap[mem_idx] = false;
139 					return 0;
140 				}
141 
142 				if ((mem_idx == LLEXT_MEM_TEXT) &&
143 				    !INSTR_FETCHABLE(ext->mem[mem_idx], region_alloc)) {
144 					LOG_WRN("Cannot reuse ELF buffer for region %d, not "
145 						"instruction memory: %p-%p",
146 						mem_idx, ext->mem[mem_idx],
147 						(void *)((uintptr_t)(ext->mem[mem_idx]) +
148 							 region->sh_size));
149 				}
150 				if (!IS_ALIGNED(ext->mem[mem_idx], region_align)) {
151 					LOG_WRN("Cannot peek region %d: %p not aligned to %#zx",
152 						mem_idx, ext->mem[mem_idx], (size_t)region_align);
153 				}
154 			}
155 		} else if (ldr_parm->pre_located) {
156 			/*
157 			 * In pre-located files all regions, including BSS,
158 			 * are placed by the user with a linker script. No
159 			 * additional memory allocation is needed here.
160 			 */
161 			ext->mem[mem_idx] = NULL;
162 			ext->mem_on_heap[mem_idx] = false;
163 			return 0;
164 		}
165 	}
166 
167 	if (ldr_parm->pre_located) {
168 		/*
169 		 * The ELF file is supposed to be pre-located, but some
170 		 * regions are not accessible or not in the correct place.
171 		 */
172 		return -EFAULT;
173 	}
174 
175 	/* Allocate a suitably aligned area for the region. */
176 	if (region->sh_flags & SHF_EXECINSTR) {
177 		ext->mem[mem_idx] = llext_aligned_alloc_instr(region_align, region_alloc);
178 	} else {
179 		ext->mem[mem_idx] = llext_aligned_alloc_data(region_align, region_alloc);
180 	}
181 
182 	if (!ext->mem[mem_idx]) {
183 		LOG_ERR("Failed allocating %zd bytes %zd-aligned for region %d",
184 			(size_t)region_alloc, (size_t)region_align, mem_idx);
185 		return -ENOMEM;
186 	}
187 
188 	ext->alloc_size += region_alloc;
189 
190 	llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
191 		region_alloc);
192 
193 	if (region->sh_type == SHT_NOBITS) {
194 		memset(ext->mem[mem_idx], 0, region->sh_size);
195 	} else {
196 		uintptr_t base = (uintptr_t)ext->mem[mem_idx];
197 		size_t offset = region->sh_offset;
198 		size_t length = region->sh_size;
199 
200 		if (region->sh_flags & SHF_ALLOC) {
201 			/* zero out any prepad bytes, not part of the data area */
202 			size_t prepad = region->sh_info;
203 
204 			memset((void *)base, 0, prepad);
205 			base += prepad;
206 			offset += prepad;
207 			length -= prepad;
208 		}
209 
210 		/* actual data area without prepad bytes */
211 		ret = llext_seek(ldr, offset);
212 		if (ret != 0) {
213 			goto err;
214 		}
215 
216 		ret = llext_read(ldr, (void *)base, length);
217 		if (ret != 0) {
218 			goto err;
219 		}
220 	}
221 
222 	ext->mem_on_heap[mem_idx] = true;
223 
224 	return 0;
225 
226 err:
227 	llext_free(ext->mem[mem_idx]);
228 	ext->mem[mem_idx] = NULL;
229 	return ret;
230 }
231 
llext_copy_strings(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)232 int llext_copy_strings(struct llext_loader *ldr, struct llext *ext,
233 		       const struct llext_load_param *ldr_parm)
234 {
235 	int ret = llext_copy_region(ldr, ext, LLEXT_MEM_SHSTRTAB, ldr_parm);
236 
237 	if (!ret) {
238 		ret = llext_copy_region(ldr, ext, LLEXT_MEM_STRTAB, ldr_parm);
239 	}
240 
241 	return ret;
242 }
243 
llext_copy_regions(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)244 int llext_copy_regions(struct llext_loader *ldr, struct llext *ext,
245 		       const struct llext_load_param *ldr_parm)
246 {
247 	for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_COUNT; mem_idx++) {
248 		/* strings have already been copied */
249 		if (ext->mem[mem_idx]) {
250 			continue;
251 		}
252 
253 		int ret = llext_copy_region(ldr, ext, mem_idx, ldr_parm);
254 
255 		if (ret < 0) {
256 			return ret;
257 		}
258 	}
259 
260 	if (IS_ENABLED(CONFIG_LLEXT_LOG_LEVEL_DBG)) {
261 		LOG_DBG("gdb add-symbol-file flags:");
262 		for (int i = 0; i < ext->sect_cnt; ++i) {
263 			elf_shdr_t *shdr = ext->sect_hdrs + i;
264 			enum llext_mem mem_idx = ldr->sect_map[i].mem_idx;
265 			const char *name = llext_section_name(ldr, ext, shdr);
266 
267 			/* only show sections mapped to program memory */
268 			if (mem_idx < LLEXT_MEM_EXPORT) {
269 				LOG_DBG("-s %s %#zx", name,
270 					(size_t)ext->mem[mem_idx] + ldr->sect_map[i].offset);
271 			}
272 		}
273 	}
274 
275 	return 0;
276 }
277 
llext_adjust_mmu_permissions(struct llext * ext)278 void llext_adjust_mmu_permissions(struct llext *ext)
279 {
280 #ifdef CONFIG_MMU
281 	void *addr;
282 	size_t size;
283 	uint32_t flags;
284 
285 	for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_PARTITIONS; mem_idx++) {
286 		addr = ext->mem[mem_idx];
287 		size = ROUND_UP(ext->mem_size[mem_idx], LLEXT_PAGE_SIZE);
288 		if (size == 0) {
289 			continue;
290 		}
291 		switch (mem_idx) {
292 		case LLEXT_MEM_TEXT:
293 			sys_cache_instr_invd_range(addr, size);
294 			flags = K_MEM_PERM_EXEC;
295 			break;
296 		case LLEXT_MEM_DATA:
297 		case LLEXT_MEM_BSS:
298 			/* memory is already K_MEM_PERM_RW by default */
299 			continue;
300 		case LLEXT_MEM_RODATA:
301 			flags = 0;
302 			break;
303 		default:
304 			continue;
305 		}
306 		sys_cache_data_flush_range(addr, size);
307 		k_mem_update_flags(addr, size, flags);
308 	}
309 
310 	ext->mmu_permissions_set = true;
311 #endif
312 }
313 
llext_free_regions(struct llext * ext)314 void llext_free_regions(struct llext *ext)
315 {
316 	for (int i = 0; i < LLEXT_MEM_COUNT; i++) {
317 #ifdef CONFIG_MMU
318 		if (ext->mmu_permissions_set && ext->mem_size[i] != 0 &&
319 		    (i == LLEXT_MEM_TEXT || i == LLEXT_MEM_RODATA)) {
320 			/* restore default RAM permissions of changed regions */
321 			k_mem_update_flags(ext->mem[i],
322 					   ROUND_UP(ext->mem_size[i], LLEXT_PAGE_SIZE),
323 					   K_MEM_PERM_RW);
324 		}
325 #endif
326 		if (ext->mem_on_heap[i]) {
327 			LOG_DBG("freeing memory region %d", i);
328 
329 			if (i == LLEXT_MEM_TEXT) {
330 				llext_free_instr(ext->mem[i]);
331 			} else {
332 				llext_free(ext->mem[i]);
333 			}
334 
335 			ext->mem[i] = NULL;
336 		}
337 	}
338 }
339 
llext_add_domain(struct llext * ext,struct k_mem_domain * domain)340 int llext_add_domain(struct llext *ext, struct k_mem_domain *domain)
341 {
342 #ifdef CONFIG_USERSPACE
343 	int ret = 0;
344 
345 	for (int i = 0; i < LLEXT_MEM_PARTITIONS; i++) {
346 		if (ext->mem_size[i] == 0) {
347 			continue;
348 		}
349 		ret = k_mem_domain_add_partition(domain, &ext->mem_parts[i]);
350 		if (ret != 0) {
351 			LOG_ERR("Failed adding memory partition %d to domain %p",
352 				i, domain);
353 			return ret;
354 		}
355 	}
356 
357 	return ret;
358 #else
359 	return -ENOSYS;
360 #endif
361 }
362 
llext_heap_init_harvard(void * instr_mem,size_t instr_bytes,void * data_mem,size_t data_bytes)363 int llext_heap_init_harvard(void *instr_mem, size_t instr_bytes, void *data_mem, size_t data_bytes)
364 {
365 #if !defined(CONFIG_LLEXT_HEAP_DYNAMIC) || !defined(CONFIG_HARVARD)
366 	return -ENOSYS;
367 #else
368 	if (llext_heap_inited) {
369 		return -EEXIST;
370 	}
371 
372 	k_heap_init(&llext_instr_heap, instr_mem, instr_bytes);
373 	k_heap_init(&llext_data_heap, data_mem, data_bytes);
374 
375 	llext_heap_inited = true;
376 	return 0;
377 #endif
378 }
379 
llext_heap_init(void * mem,size_t bytes)380 int llext_heap_init(void *mem, size_t bytes)
381 {
382 #if !defined(CONFIG_LLEXT_HEAP_DYNAMIC) || defined(CONFIG_HARVARD)
383 	return -ENOSYS;
384 #else
385 	if (llext_heap_inited) {
386 		return -EEXIST;
387 	}
388 
389 	k_heap_init(&llext_heap, mem, bytes);
390 
391 	llext_heap_inited = true;
392 	return 0;
393 #endif
394 }
395 
396 #ifdef CONFIG_LLEXT_HEAP_DYNAMIC
llext_loaded(struct llext * ext,void * arg)397 static int llext_loaded(struct llext *ext, void *arg)
398 {
399 	return 1;
400 }
401 #endif
402 
llext_heap_uninit(void)403 int llext_heap_uninit(void)
404 {
405 #ifdef CONFIG_LLEXT_HEAP_DYNAMIC
406 	if (!llext_heap_inited) {
407 		return -EEXIST;
408 	}
409 	if (llext_iterate(llext_loaded, NULL)) {
410 		return -EBUSY;
411 	}
412 	llext_heap_inited = false;
413 	return 0;
414 #else
415 	return -ENOSYS;
416 #endif
417 }
418