1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2015-2020, 2022 Linaro Limited
5 * Copyright (c) 2020-2021, Arm Limited
6 */
7
8 #include <assert.h>
9 #include <kernel/ldelf_loader.h>
10 #include <kernel/ldelf_syscalls.h>
11 #include <ldelf.h>
12 #include <mm/mobj.h>
13 #include <mm/vm.h>
14 #include <tee/arch_svc.h>
15
16 extern uint8_t ldelf_data[];
17 extern const unsigned int ldelf_code_size;
18 extern const unsigned int ldelf_data_size;
19 extern const unsigned int ldelf_entry;
20
21 /* ldelf has the same architecture/register width as the kernel */
22 #ifdef ARM32
23 static const bool is_arm32 = true;
24 #else
25 static const bool is_arm32;
26 #endif
27
alloc_and_map_ldelf_fobj(struct user_mode_ctx * uctx,size_t sz,uint32_t prot,vaddr_t * va)28 static TEE_Result alloc_and_map_ldelf_fobj(struct user_mode_ctx *uctx,
29 size_t sz, uint32_t prot,
30 vaddr_t *va)
31 {
32 size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
33 struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
34 struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL,
35 TEE_MATTR_MEM_TYPE_TAGGED);
36 TEE_Result res = TEE_SUCCESS;
37
38 fobj_put(fobj);
39 if (!mobj)
40 return TEE_ERROR_OUT_OF_MEMORY;
41 res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE,
42 prot, VM_FLAG_LDELF, mobj, 0);
43 mobj_put(mobj);
44
45 return res;
46 }
47
48 /*
49 * This function may leave a few mappings behind on error, but that's taken
50 * care of by tee_ta_init_user_ta_session() since the entire context is
51 * removed then.
52 */
ldelf_load_ldelf(struct user_mode_ctx * uctx)53 TEE_Result ldelf_load_ldelf(struct user_mode_ctx *uctx)
54 {
55 TEE_Result res = TEE_SUCCESS;
56 vaddr_t stack_addr = 0;
57 vaddr_t code_addr = 0;
58 vaddr_t rw_addr = 0;
59 uint32_t prot = 0;
60
61 uctx->is_32bit = is_arm32;
62
63 res = alloc_and_map_ldelf_fobj(uctx, LDELF_STACK_SIZE,
64 TEE_MATTR_URW | TEE_MATTR_PRW,
65 &stack_addr);
66 if (res)
67 return res;
68 uctx->ldelf_stack_ptr = stack_addr + LDELF_STACK_SIZE;
69
70 res = alloc_and_map_ldelf_fobj(uctx, ldelf_code_size, TEE_MATTR_PRW,
71 &code_addr);
72 if (res)
73 return res;
74 uctx->entry_func = code_addr + ldelf_entry;
75
76 rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE);
77 res = alloc_and_map_ldelf_fobj(uctx, ldelf_data_size,
78 TEE_MATTR_URW | TEE_MATTR_PRW, &rw_addr);
79 if (res)
80 return res;
81
82 vm_set_ctx(uctx->ts_ctx);
83
84 memcpy((void *)code_addr, ldelf_data, ldelf_code_size);
85 memcpy((void *)rw_addr, ldelf_data + ldelf_code_size, ldelf_data_size);
86
87 prot = TEE_MATTR_URX;
88 if (IS_ENABLED(CFG_CORE_BTI))
89 prot |= TEE_MATTR_GUARDED;
90
91 res = vm_set_prot(uctx, code_addr,
92 ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot);
93 if (res)
94 return res;
95
96 DMSG("ldelf load address %#"PRIxVA, code_addr);
97
98 return TEE_SUCCESS;
99 }
100
ldelf_init_with_ldelf(struct ts_session * sess,struct user_mode_ctx * uctx)101 TEE_Result ldelf_init_with_ldelf(struct ts_session *sess,
102 struct user_mode_ctx *uctx)
103 {
104 TEE_Result res = TEE_SUCCESS;
105 struct ldelf_arg *arg = NULL;
106 uint32_t panic_code = 0;
107 uint32_t panicked = 0;
108 uaddr_t usr_stack = 0;
109
110 usr_stack = uctx->ldelf_stack_ptr;
111 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
112 arg = (struct ldelf_arg *)usr_stack;
113 memset(arg, 0, sizeof(*arg));
114 arg->uuid = uctx->ts_ctx->uuid;
115 sess->handle_svc = ldelf_handle_svc;
116
117 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
118 usr_stack, uctx->entry_func,
119 is_arm32, &panicked, &panic_code);
120
121 sess->handle_svc = sess->ctx->ops->handle_svc;
122 thread_user_clear_vfp(uctx);
123 ldelf_sess_cleanup(sess);
124
125 if (panicked) {
126 abort_print_current_ts();
127 EMSG("ldelf panicked");
128 return TEE_ERROR_GENERIC;
129 }
130 if (res) {
131 EMSG("ldelf failed with res: %#"PRIx32, res);
132 return res;
133 }
134
135 res = vm_check_access_rights(uctx,
136 TEE_MEMORY_ACCESS_READ |
137 TEE_MEMORY_ACCESS_ANY_OWNER,
138 (uaddr_t)arg, sizeof(*arg));
139 if (res)
140 return res;
141
142 if (is_user_ta_ctx(uctx->ts_ctx)) {
143 /*
144 * This is already checked by the elf loader, but since it runs
145 * in user mode we're not trusting it entirely.
146 */
147 if (arg->flags & ~TA_FLAGS_MASK)
148 return TEE_ERROR_BAD_FORMAT;
149
150 to_user_ta_ctx(uctx->ts_ctx)->ta_ctx.flags = arg->flags;
151 }
152
153 uctx->is_32bit = arg->is_32bit;
154 uctx->entry_func = arg->entry_func;
155 uctx->stack_ptr = arg->stack_ptr;
156 uctx->dump_entry_func = arg->dump_entry;
157 #ifdef CFG_FTRACE_SUPPORT
158 uctx->ftrace_entry_func = arg->ftrace_entry;
159 sess->fbuf = arg->fbuf;
160 #endif
161 uctx->dl_entry_func = arg->dl_entry;
162
163 return TEE_SUCCESS;
164 }
165
ldelf_dump_state(struct user_mode_ctx * uctx)166 TEE_Result ldelf_dump_state(struct user_mode_ctx *uctx)
167 {
168 TEE_Result res = TEE_SUCCESS;
169 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
170 struct dump_entry_arg *arg = NULL;
171 uint32_t panic_code = 0;
172 uint32_t panicked = 0;
173 struct thread_specific_data *tsd = thread_get_tsd();
174 struct ts_session *sess = NULL;
175 struct vm_region *r = NULL;
176 size_t n = 0;
177
178 TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
179 if (r->attr & TEE_MATTR_URWX)
180 n++;
181
182 usr_stack = uctx->ldelf_stack_ptr;
183 usr_stack -= ROUNDUP(sizeof(*arg) + n * sizeof(struct dump_map),
184 STACK_ALIGNMENT);
185 arg = (struct dump_entry_arg *)usr_stack;
186
187 res = vm_check_access_rights(uctx,
188 TEE_MEMORY_ACCESS_READ |
189 TEE_MEMORY_ACCESS_ANY_OWNER,
190 (uaddr_t)arg, sizeof(*arg));
191 if (res) {
192 EMSG("ldelf stack is inaccessible!");
193 return res;
194 }
195
196 memset(arg, 0, sizeof(*arg) + n * sizeof(struct dump_map));
197
198 arg->num_maps = n;
199 n = 0;
200 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
201 if (r->attr & TEE_MATTR_URWX) {
202 if (r->mobj)
203 mobj_get_pa(r->mobj, r->offset, 0,
204 &arg->maps[n].pa);
205 arg->maps[n].va = r->va;
206 arg->maps[n].sz = r->size;
207 if (r->attr & TEE_MATTR_UR)
208 arg->maps[n].flags |= DUMP_MAP_READ;
209 if (r->attr & TEE_MATTR_UW)
210 arg->maps[n].flags |= DUMP_MAP_WRITE;
211 if (r->attr & TEE_MATTR_UX)
212 arg->maps[n].flags |= DUMP_MAP_EXEC;
213 if (r->attr & TEE_MATTR_SECURE)
214 arg->maps[n].flags |= DUMP_MAP_SECURE;
215 if (r->flags & VM_FLAG_EPHEMERAL)
216 arg->maps[n].flags |= DUMP_MAP_EPHEM;
217 if (r->flags & VM_FLAG_LDELF)
218 arg->maps[n].flags |= DUMP_MAP_LDELF;
219 n++;
220 }
221 }
222
223 arg->is_arm32 = uctx->is_32bit;
224 #ifdef ARM32
225 arg->arm32.regs[0] = tsd->abort_regs.r0;
226 arg->arm32.regs[1] = tsd->abort_regs.r1;
227 arg->arm32.regs[2] = tsd->abort_regs.r2;
228 arg->arm32.regs[3] = tsd->abort_regs.r3;
229 arg->arm32.regs[4] = tsd->abort_regs.r4;
230 arg->arm32.regs[5] = tsd->abort_regs.r5;
231 arg->arm32.regs[6] = tsd->abort_regs.r6;
232 arg->arm32.regs[7] = tsd->abort_regs.r7;
233 arg->arm32.regs[8] = tsd->abort_regs.r8;
234 arg->arm32.regs[9] = tsd->abort_regs.r9;
235 arg->arm32.regs[10] = tsd->abort_regs.r10;
236 arg->arm32.regs[11] = tsd->abort_regs.r11;
237 arg->arm32.regs[12] = tsd->abort_regs.ip;
238 arg->arm32.regs[13] = tsd->abort_regs.usr_sp; /*SP*/
239 arg->arm32.regs[14] = tsd->abort_regs.usr_lr; /*LR*/
240 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
241 #endif /*ARM32*/
242 #ifdef ARM64
243 if (uctx->is_32bit) {
244 arg->arm32.regs[0] = tsd->abort_regs.x0;
245 arg->arm32.regs[1] = tsd->abort_regs.x1;
246 arg->arm32.regs[2] = tsd->abort_regs.x2;
247 arg->arm32.regs[3] = tsd->abort_regs.x3;
248 arg->arm32.regs[4] = tsd->abort_regs.x4;
249 arg->arm32.regs[5] = tsd->abort_regs.x5;
250 arg->arm32.regs[6] = tsd->abort_regs.x6;
251 arg->arm32.regs[7] = tsd->abort_regs.x7;
252 arg->arm32.regs[8] = tsd->abort_regs.x8;
253 arg->arm32.regs[9] = tsd->abort_regs.x9;
254 arg->arm32.regs[10] = tsd->abort_regs.x10;
255 arg->arm32.regs[11] = tsd->abort_regs.x11;
256 arg->arm32.regs[12] = tsd->abort_regs.x12;
257 arg->arm32.regs[13] = tsd->abort_regs.x13; /*SP*/
258 arg->arm32.regs[14] = tsd->abort_regs.x14; /*LR*/
259 arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
260 } else {
261 arg->arm64.fp = tsd->abort_regs.x29;
262 arg->arm64.pc = tsd->abort_regs.elr;
263 arg->arm64.sp = tsd->abort_regs.sp_el0;
264 }
265 #endif /*ARM64*/
266
267 sess = ts_get_current_session();
268 sess->handle_svc = ldelf_handle_svc;
269
270 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
271 usr_stack, uctx->dump_entry_func,
272 is_arm32, &panicked, &panic_code);
273
274 sess->handle_svc = sess->ctx->ops->handle_svc;
275 thread_user_clear_vfp(uctx);
276 ldelf_sess_cleanup(sess);
277
278 if (panicked) {
279 uctx->dump_entry_func = 0;
280 EMSG("ldelf dump function panicked");
281 abort_print_current_ts();
282 res = TEE_ERROR_TARGET_DEAD;
283 }
284
285 return res;
286 }
287
288 #ifdef CFG_FTRACE_SUPPORT
ldelf_dump_ftrace(struct user_mode_ctx * uctx,void * buf,size_t * blen)289 TEE_Result ldelf_dump_ftrace(struct user_mode_ctx *uctx,
290 void *buf, size_t *blen)
291 {
292 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
293 TEE_Result res = TEE_SUCCESS;
294 uint32_t panic_code = 0;
295 uint32_t panicked = 0;
296 size_t *arg = NULL;
297 struct ts_session *sess = NULL;
298
299 if (!uctx->ftrace_entry_func)
300 return TEE_ERROR_NOT_SUPPORTED;
301
302 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
303 arg = (size_t *)usr_stack;
304
305 res = vm_check_access_rights(uctx,
306 TEE_MEMORY_ACCESS_READ |
307 TEE_MEMORY_ACCESS_ANY_OWNER,
308 (uaddr_t)arg, sizeof(*arg));
309 if (res) {
310 EMSG("ldelf stack is inaccessible!");
311 return res;
312 }
313
314 *arg = *blen;
315
316 sess = ts_get_current_session();
317 sess->handle_svc = ldelf_handle_svc;
318
319 res = thread_enter_user_mode((vaddr_t)buf, (vaddr_t)arg, 0, 0,
320 usr_stack, uctx->ftrace_entry_func,
321 is_arm32, &panicked, &panic_code);
322
323 sess->handle_svc = sess->ctx->ops->handle_svc;
324 thread_user_clear_vfp(uctx);
325 ldelf_sess_cleanup(sess);
326
327 if (panicked) {
328 uctx->ftrace_entry_func = 0;
329 EMSG("ldelf ftrace function panicked");
330 abort_print_current_ts();
331 res = TEE_ERROR_TARGET_DEAD;
332 }
333
334 if (!res) {
335 if (*arg > *blen)
336 res = TEE_ERROR_SHORT_BUFFER;
337 *blen = *arg;
338 }
339
340 return res;
341 }
342 #endif /*CFG_FTRACE_SUPPORT*/
343
ldelf_dlopen(struct user_mode_ctx * uctx,TEE_UUID * uuid,uint32_t flags)344 TEE_Result ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid,
345 uint32_t flags)
346 {
347 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
348 TEE_Result res = TEE_ERROR_GENERIC;
349 struct dl_entry_arg *arg = NULL;
350 uint32_t panic_code = 0;
351 uint32_t panicked = 0;
352 struct ts_session *sess = NULL;
353
354 assert(uuid);
355
356 usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
357 arg = (struct dl_entry_arg *)usr_stack;
358
359 res = vm_check_access_rights(uctx,
360 TEE_MEMORY_ACCESS_READ |
361 TEE_MEMORY_ACCESS_WRITE |
362 TEE_MEMORY_ACCESS_ANY_OWNER,
363 (uaddr_t)arg, sizeof(*arg));
364 if (res) {
365 EMSG("ldelf stack is inaccessible!");
366 return res;
367 }
368
369 memset(arg, 0, sizeof(*arg));
370 arg->cmd = LDELF_DL_ENTRY_DLOPEN;
371 arg->dlopen.uuid = *uuid;
372 arg->dlopen.flags = flags;
373
374 sess = ts_get_current_session();
375 sess->handle_svc = ldelf_handle_svc;
376
377 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
378 usr_stack, uctx->dl_entry_func,
379 is_arm32, &panicked, &panic_code);
380
381 sess->handle_svc = sess->ctx->ops->handle_svc;
382 ldelf_sess_cleanup(sess);
383
384 if (panicked) {
385 EMSG("ldelf dl_entry function panicked");
386 abort_print_current_ts();
387 res = TEE_ERROR_TARGET_DEAD;
388 }
389 if (!res)
390 res = arg->ret;
391
392 return res;
393 }
394
ldelf_dlsym(struct user_mode_ctx * uctx,TEE_UUID * uuid,const char * sym,size_t maxlen,vaddr_t * val)395 TEE_Result ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid,
396 const char *sym, size_t maxlen, vaddr_t *val)
397 {
398 uaddr_t usr_stack = uctx->ldelf_stack_ptr;
399 TEE_Result res = TEE_ERROR_GENERIC;
400 struct dl_entry_arg *arg = NULL;
401 uint32_t panic_code = 0;
402 uint32_t panicked = 0;
403 size_t len = strnlen(sym, maxlen);
404 struct ts_session *sess = NULL;
405
406 if (len == maxlen)
407 return TEE_ERROR_BAD_PARAMETERS;
408
409 usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
410 arg = (struct dl_entry_arg *)usr_stack;
411
412 res = vm_check_access_rights(uctx,
413 TEE_MEMORY_ACCESS_READ |
414 TEE_MEMORY_ACCESS_WRITE |
415 TEE_MEMORY_ACCESS_ANY_OWNER,
416 (uaddr_t)arg, sizeof(*arg) + len + 1);
417 if (res) {
418 EMSG("ldelf stack is inaccessible!");
419 return res;
420 }
421
422 memset(arg, 0, sizeof(*arg));
423 arg->cmd = LDELF_DL_ENTRY_DLSYM;
424 arg->dlsym.uuid = *uuid;
425 memcpy(arg->dlsym.symbol, sym, len);
426 arg->dlsym.symbol[len] = '\0';
427
428 sess = ts_get_current_session();
429 sess->handle_svc = ldelf_handle_svc;
430
431 res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
432 usr_stack, uctx->dl_entry_func,
433 is_arm32, &panicked, &panic_code);
434
435 sess->handle_svc = sess->ctx->ops->handle_svc;
436 ldelf_sess_cleanup(sess);
437
438 if (panicked) {
439 EMSG("ldelf dl_entry function panicked");
440 abort_print_current_ts();
441 res = TEE_ERROR_TARGET_DEAD;
442 }
443 if (!res) {
444 res = arg->ret;
445 if (!res)
446 *val = arg->dlsym.val;
447 }
448
449 return res;
450 }
451