1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2018-2019, 2022 Linaro Limited
4 * Copyright (c) 2020-2021, Arm Limited
5 */
6
7 #include <assert.h>
8 #include <crypto/crypto.h>
9 #include <kernel/ldelf_syscalls.h>
10 #include <kernel/user_mode_ctx.h>
11 #include <ldelf.h>
12 #include <mm/file.h>
13 #include <mm/fobj.h>
14 #include <mm/mobj.h>
15 #include <mm/vm.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <trace.h>
19 #include <util.h>
20
21 struct bin_handle {
22 const struct ts_store_ops *op;
23 struct ts_store_handle *h;
24 struct file *f;
25 size_t offs_bytes;
26 size_t size_bytes;
27 };
28
ldelf_syscall_map_zi(vaddr_t * va,size_t num_bytes,size_t pad_begin,size_t pad_end,unsigned long flags)29 TEE_Result ldelf_syscall_map_zi(vaddr_t *va, size_t num_bytes, size_t pad_begin,
30 size_t pad_end, unsigned long flags)
31 {
32 TEE_Result res = TEE_SUCCESS;
33 struct ts_session *sess = ts_get_current_session();
34 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
35 struct fobj *f = NULL;
36 struct mobj *mobj = NULL;
37 uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
38 uint32_t vm_flags = 0;
39
40 if (flags & ~LDELF_MAP_FLAG_SHAREABLE)
41 return TEE_ERROR_BAD_PARAMETERS;
42
43 if (flags & LDELF_MAP_FLAG_SHAREABLE)
44 vm_flags |= VM_FLAG_SHAREABLE;
45
46 f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
47 if (!f)
48 return TEE_ERROR_OUT_OF_MEMORY;
49 mobj = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED);
50 fobj_put(f);
51 if (!mobj)
52 return TEE_ERROR_OUT_OF_MEMORY;
53 res = vm_map_pad(uctx, va, num_bytes, prot, vm_flags,
54 mobj, 0, pad_begin, pad_end, 0);
55 mobj_put(mobj);
56
57 return res;
58 }
59
ldelf_syscall_unmap(vaddr_t va,size_t num_bytes)60 TEE_Result ldelf_syscall_unmap(vaddr_t va, size_t num_bytes)
61 {
62 TEE_Result res = TEE_SUCCESS;
63 struct ts_session *sess = ts_get_current_session();
64 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
65 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
66 uint32_t vm_flags = 0;
67 vaddr_t end_va = 0;
68
69 /*
70 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
71 * overflow directly or indirectly. However, since this function is an
72 * API function it's worth having an extra guard here. If nothing else,
73 * to increase code clarity.
74 */
75 if (ADD_OVERFLOW(va, sz, &end_va))
76 return TEE_ERROR_BAD_PARAMETERS;
77
78 res = vm_get_flags(uctx, va, sz, &vm_flags);
79 if (res)
80 return res;
81 if (vm_flags & VM_FLAG_PERMANENT)
82 return TEE_ERROR_ACCESS_DENIED;
83
84 return vm_unmap(uctx, va, sz);
85 }
86
bin_close(void * ptr)87 static void bin_close(void *ptr)
88 {
89 struct bin_handle *binh = ptr;
90
91 if (binh) {
92 if (binh->op && binh->h)
93 binh->op->close(binh->h);
94 file_put(binh->f);
95 }
96 free(binh);
97 }
98
ldelf_syscall_open_bin(const TEE_UUID * uuid,size_t uuid_size,uint32_t * handle)99 TEE_Result ldelf_syscall_open_bin(const TEE_UUID *uuid, size_t uuid_size,
100 uint32_t *handle)
101 {
102 TEE_Result res = TEE_SUCCESS;
103 struct ts_session *sess = ts_get_current_session();
104 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
105 struct system_ctx *sys_ctx = sess->user_ctx;
106 struct bin_handle *binh = NULL;
107 uint8_t tag[FILE_TAG_SIZE] = { 0 };
108 unsigned int tag_len = sizeof(tag);
109 int h = 0;
110
111 res = vm_check_access_rights(uctx,
112 TEE_MEMORY_ACCESS_READ |
113 TEE_MEMORY_ACCESS_ANY_OWNER,
114 (uaddr_t)uuid, sizeof(TEE_UUID));
115 if (res)
116 return res;
117
118 res = vm_check_access_rights(uctx,
119 TEE_MEMORY_ACCESS_WRITE |
120 TEE_MEMORY_ACCESS_ANY_OWNER,
121 (uaddr_t)handle, sizeof(uint32_t));
122 if (res)
123 return res;
124
125 if (uuid_size != sizeof(*uuid))
126 return TEE_ERROR_BAD_PARAMETERS;
127
128 if (!sys_ctx) {
129 sys_ctx = calloc(1, sizeof(*sys_ctx));
130 if (!sys_ctx)
131 return TEE_ERROR_OUT_OF_MEMORY;
132 sess->user_ctx = sys_ctx;
133 }
134
135 binh = calloc(1, sizeof(*binh));
136 if (!binh)
137 return TEE_ERROR_OUT_OF_MEMORY;
138
139 if (is_user_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)) {
140 SCATTERED_ARRAY_FOREACH(binh->op, ta_stores,
141 struct ts_store_ops) {
142 DMSG("Lookup user TA ELF %pUl (%s)",
143 (void *)uuid, binh->op->description);
144
145 res = binh->op->open(uuid, &binh->h);
146 DMSG("res=%#"PRIx32, res);
147 if (res != TEE_ERROR_ITEM_NOT_FOUND &&
148 res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
149 break;
150 }
151 } else if (is_sp_ctx(sess->ctx)) {
152 SCATTERED_ARRAY_FOREACH(binh->op, sp_stores,
153 struct ts_store_ops) {
154 DMSG("Lookup user SP ELF %pUl (%s)",
155 (void *)uuid, binh->op->description);
156
157 res = binh->op->open(uuid, &binh->h);
158 DMSG("res=%#"PRIx32, res);
159 if (res != TEE_ERROR_ITEM_NOT_FOUND &&
160 res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
161 break;
162 }
163 } else {
164 res = TEE_ERROR_ITEM_NOT_FOUND;
165 }
166
167 if (res)
168 goto err;
169
170 res = binh->op->get_size(binh->h, &binh->size_bytes);
171 if (res)
172 goto err;
173 res = binh->op->get_tag(binh->h, tag, &tag_len);
174 if (res)
175 goto err;
176 binh->f = file_get_by_tag(tag, tag_len);
177 if (!binh->f)
178 goto err_oom;
179
180 h = handle_get(&sys_ctx->db, binh);
181 if (h < 0)
182 goto err_oom;
183 *handle = h;
184
185 return TEE_SUCCESS;
186
187 err_oom:
188 res = TEE_ERROR_OUT_OF_MEMORY;
189 err:
190 bin_close(binh);
191 return res;
192 }
193
ldelf_syscall_close_bin(unsigned long handle)194 TEE_Result ldelf_syscall_close_bin(unsigned long handle)
195 {
196 TEE_Result res = TEE_SUCCESS;
197 struct ts_session *sess = ts_get_current_session();
198 struct system_ctx *sys_ctx = sess->user_ctx;
199 struct bin_handle *binh = NULL;
200
201 if (!sys_ctx)
202 return TEE_ERROR_BAD_PARAMETERS;
203
204 binh = handle_put(&sys_ctx->db, handle);
205 if (!binh)
206 return TEE_ERROR_BAD_PARAMETERS;
207
208 if (binh->offs_bytes < binh->size_bytes)
209 res = binh->op->read(binh->h, NULL,
210 binh->size_bytes - binh->offs_bytes);
211
212 bin_close(binh);
213 if (handle_db_is_empty(&sys_ctx->db)) {
214 handle_db_destroy(&sys_ctx->db, bin_close);
215 free(sys_ctx);
216 sess->user_ctx = NULL;
217 }
218
219 return res;
220 }
221
binh_copy_to(struct bin_handle * binh,vaddr_t va,size_t offs_bytes,size_t num_bytes)222 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
223 size_t offs_bytes, size_t num_bytes)
224 {
225 TEE_Result res = TEE_SUCCESS;
226 size_t next_offs = 0;
227
228 if (offs_bytes < binh->offs_bytes)
229 return TEE_ERROR_BAD_STATE;
230
231 if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
232 return TEE_ERROR_BAD_PARAMETERS;
233
234 if (offs_bytes > binh->offs_bytes) {
235 res = binh->op->read(binh->h, NULL,
236 offs_bytes - binh->offs_bytes);
237 if (res)
238 return res;
239 binh->offs_bytes = offs_bytes;
240 }
241
242 if (next_offs > binh->size_bytes) {
243 size_t rb = binh->size_bytes - binh->offs_bytes;
244
245 res = binh->op->read(binh->h, (void *)va, rb);
246 if (res)
247 return res;
248 memset((uint8_t *)va + rb, 0, num_bytes - rb);
249 binh->offs_bytes = binh->size_bytes;
250 } else {
251 res = binh->op->read(binh->h, (void *)va, num_bytes);
252 if (res)
253 return res;
254 binh->offs_bytes = next_offs;
255 }
256
257 return TEE_SUCCESS;
258 }
259
ldelf_syscall_map_bin(vaddr_t * va,size_t num_bytes,unsigned long handle,size_t offs_bytes,size_t pad_begin,size_t pad_end,unsigned long flags)260 TEE_Result ldelf_syscall_map_bin(vaddr_t *va, size_t num_bytes,
261 unsigned long handle, size_t offs_bytes,
262 size_t pad_begin, size_t pad_end,
263 unsigned long flags)
264 {
265 TEE_Result res = TEE_SUCCESS;
266 struct ts_session *sess = ts_get_current_session();
267 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
268 struct system_ctx *sys_ctx = sess->user_ctx;
269 struct bin_handle *binh = NULL;
270 uint32_t num_rounded_bytes = 0;
271 struct file_slice *fs = NULL;
272 bool file_is_locked = false;
273 struct mobj *mobj = NULL;
274 uint32_t offs_pages = 0;
275 size_t num_pages = 0;
276 uint32_t prot = 0;
277 const uint32_t accept_flags = LDELF_MAP_FLAG_SHAREABLE |
278 LDELF_MAP_FLAG_WRITEABLE |
279 LDELF_MAP_FLAG_BTI |
280 LDELF_MAP_FLAG_EXECUTABLE;
281
282 if (!sys_ctx)
283 return TEE_ERROR_BAD_PARAMETERS;
284
285 binh = handle_lookup(&sys_ctx->db, handle);
286 if (!binh)
287 return TEE_ERROR_BAD_PARAMETERS;
288
289 if ((flags & accept_flags) != flags)
290 return TEE_ERROR_BAD_PARAMETERS;
291
292 if ((flags & LDELF_MAP_FLAG_SHAREABLE) &&
293 (flags & LDELF_MAP_FLAG_WRITEABLE))
294 return TEE_ERROR_BAD_PARAMETERS;
295
296 if ((flags & LDELF_MAP_FLAG_EXECUTABLE) &&
297 (flags & LDELF_MAP_FLAG_WRITEABLE))
298 return TEE_ERROR_BAD_PARAMETERS;
299
300 if (offs_bytes & SMALL_PAGE_MASK)
301 return TEE_ERROR_BAD_PARAMETERS;
302
303 prot = TEE_MATTR_UR | TEE_MATTR_PR;
304 if (flags & LDELF_MAP_FLAG_WRITEABLE)
305 prot |= TEE_MATTR_UW | TEE_MATTR_PW;
306 if (flags & LDELF_MAP_FLAG_EXECUTABLE)
307 prot |= TEE_MATTR_UX;
308 if (flags & LDELF_MAP_FLAG_BTI)
309 prot |= TEE_MATTR_GUARDED;
310
311 offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
312 if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
313 return TEE_ERROR_BAD_PARAMETERS;
314 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
315
316 if (!file_trylock(binh->f)) {
317 /*
318 * Before we can block on the file lock we must make all
319 * our page tables available for reclaiming in order to
320 * avoid a dead-lock with the other thread (which already
321 * is holding the file lock) mapping lots of memory below.
322 */
323 vm_set_ctx(NULL);
324 file_lock(binh->f);
325 vm_set_ctx(uctx->ts_ctx);
326 }
327 file_is_locked = true;
328 fs = file_find_slice(binh->f, offs_pages);
329 if (fs) {
330 /* If there's registered slice it has to match */
331 if (fs->page_offset != offs_pages ||
332 num_pages > fs->fobj->num_pages) {
333 res = TEE_ERROR_BAD_PARAMETERS;
334 goto err;
335 }
336
337 /* If there's a slice we must be mapping shareable */
338 if (!(flags & LDELF_MAP_FLAG_SHAREABLE)) {
339 res = TEE_ERROR_BAD_PARAMETERS;
340 goto err;
341 }
342
343 mobj = mobj_with_fobj_alloc(fs->fobj, binh->f,
344 TEE_MATTR_MEM_TYPE_TAGGED);
345 if (!mobj) {
346 res = TEE_ERROR_OUT_OF_MEMORY;
347 goto err;
348 }
349 res = vm_map_pad(uctx, va, num_rounded_bytes,
350 prot, VM_FLAG_READONLY,
351 mobj, 0, pad_begin, pad_end, 0);
352 mobj_put(mobj);
353 if (res)
354 goto err;
355 } else {
356 struct fobj *f = fobj_ta_mem_alloc(num_pages);
357 struct file *file = NULL;
358 uint32_t vm_flags = 0;
359
360 if (!f) {
361 res = TEE_ERROR_OUT_OF_MEMORY;
362 goto err;
363 }
364 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
365 file = binh->f;
366 vm_flags |= VM_FLAG_READONLY;
367 }
368
369 mobj = mobj_with_fobj_alloc(f, file, TEE_MATTR_MEM_TYPE_TAGGED);
370 fobj_put(f);
371 if (!mobj) {
372 res = TEE_ERROR_OUT_OF_MEMORY;
373 goto err;
374 }
375 res = vm_map_pad(uctx, va, num_rounded_bytes,
376 TEE_MATTR_PRW, vm_flags, mobj, 0,
377 pad_begin, pad_end, 0);
378 mobj_put(mobj);
379 if (res)
380 goto err;
381 res = binh_copy_to(binh, *va, offs_bytes, num_bytes);
382 if (res)
383 goto err_unmap_va;
384 res = vm_set_prot(uctx, *va, num_rounded_bytes,
385 prot);
386 if (res)
387 goto err_unmap_va;
388
389 /*
390 * The context currently is active set it again to update
391 * the mapping.
392 */
393 vm_set_ctx(uctx->ts_ctx);
394
395 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
396 res = file_add_slice(binh->f, f, offs_pages);
397 if (res)
398 goto err_unmap_va;
399 }
400 }
401
402 file_unlock(binh->f);
403
404 return TEE_SUCCESS;
405
406 err_unmap_va:
407 if (vm_unmap(uctx, *va, num_rounded_bytes))
408 panic();
409
410 /*
411 * The context currently is active set it again to update
412 * the mapping.
413 */
414 vm_set_ctx(uctx->ts_ctx);
415
416 err:
417 if (file_is_locked)
418 file_unlock(binh->f);
419
420 return res;
421 }
422
ldelf_syscall_copy_from_bin(void * dst,size_t offs,size_t num_bytes,unsigned long handle)423 TEE_Result ldelf_syscall_copy_from_bin(void *dst, size_t offs, size_t num_bytes,
424 unsigned long handle)
425 {
426 TEE_Result res = TEE_SUCCESS;
427 struct ts_session *sess = ts_get_current_session();
428 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
429 struct system_ctx *sys_ctx = sess->user_ctx;
430 struct bin_handle *binh = NULL;
431
432 res = vm_check_access_rights(uctx,
433 TEE_MEMORY_ACCESS_WRITE |
434 TEE_MEMORY_ACCESS_ANY_OWNER,
435 (uaddr_t)dst, num_bytes);
436 if (res)
437 return res;
438
439 if (!sys_ctx)
440 return TEE_ERROR_BAD_PARAMETERS;
441
442 binh = handle_lookup(&sys_ctx->db, handle);
443 if (!binh)
444 return TEE_ERROR_BAD_PARAMETERS;
445
446 return binh_copy_to(binh, (vaddr_t)dst, offs, num_bytes);
447 }
448
ldelf_syscall_set_prot(unsigned long va,size_t num_bytes,unsigned long flags)449 TEE_Result ldelf_syscall_set_prot(unsigned long va, size_t num_bytes,
450 unsigned long flags)
451 {
452 TEE_Result res = TEE_SUCCESS;
453 struct ts_session *sess = ts_get_current_session();
454 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
455 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
456 uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
457 uint32_t vm_flags = 0;
458 vaddr_t end_va = 0;
459 const uint32_t accept_flags = LDELF_MAP_FLAG_WRITEABLE |
460 LDELF_MAP_FLAG_BTI |
461 LDELF_MAP_FLAG_EXECUTABLE;
462
463 if ((flags & accept_flags) != flags)
464 return TEE_ERROR_BAD_PARAMETERS;
465 if (flags & LDELF_MAP_FLAG_WRITEABLE)
466 prot |= TEE_MATTR_UW | TEE_MATTR_PW;
467 if (flags & LDELF_MAP_FLAG_EXECUTABLE)
468 prot |= TEE_MATTR_UX;
469 if (flags & LDELF_MAP_FLAG_BTI)
470 prot |= TEE_MATTR_GUARDED;
471
472 /*
473 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
474 * overflow directly or indirectly. However, since this function is an
475 * API function it's worth having an extra guard here. If nothing else,
476 * to increase code clarity.
477 */
478 if (ADD_OVERFLOW(va, sz, &end_va))
479 return TEE_ERROR_BAD_PARAMETERS;
480
481 res = vm_get_flags(uctx, va, sz, &vm_flags);
482 if (res)
483 return res;
484 if (vm_flags & VM_FLAG_PERMANENT)
485 return TEE_ERROR_ACCESS_DENIED;
486
487 /*
488 * If the segment is a mapping of a part of a file (vm_flags &
489 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
490 * files are mapped read-only.
491 */
492 if ((vm_flags & VM_FLAG_READONLY) &&
493 (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
494 return TEE_ERROR_ACCESS_DENIED;
495
496 return vm_set_prot(uctx, va, sz, prot);
497 }
498
ldelf_syscall_remap(unsigned long old_va,vaddr_t * new_va,size_t num_bytes,size_t pad_begin,size_t pad_end)499 TEE_Result ldelf_syscall_remap(unsigned long old_va, vaddr_t *new_va,
500 size_t num_bytes, size_t pad_begin,
501 size_t pad_end)
502 {
503 TEE_Result res = TEE_SUCCESS;
504 struct ts_session *sess = ts_get_current_session();
505 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
506 uint32_t vm_flags = 0;
507
508 res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags);
509 if (res)
510 return res;
511 if (vm_flags & VM_FLAG_PERMANENT)
512 return TEE_ERROR_ACCESS_DENIED;
513
514 res = vm_remap(uctx, new_va, old_va, num_bytes, pad_begin, pad_end);
515
516 return res;
517 }
518
ldelf_syscall_gen_rnd_num(void * buf,size_t num_bytes)519 TEE_Result ldelf_syscall_gen_rnd_num(void *buf, size_t num_bytes)
520 {
521 TEE_Result res = TEE_SUCCESS;
522 struct ts_session *sess = ts_get_current_session();
523 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
524
525 res = vm_check_access_rights(uctx,
526 TEE_MEMORY_ACCESS_WRITE |
527 TEE_MEMORY_ACCESS_ANY_OWNER,
528 (uaddr_t)buf, num_bytes);
529 if (res)
530 return res;
531
532 return crypto_rng_read(buf, num_bytes);
533 }
534
535 /*
536 * Should be called after returning from ldelf. If user_ctx is not NULL means
537 * that ldelf crashed or otherwise didn't complete properly. This function will
538 * close the remaining handles and free the context structs allocated by ldelf.
539 */
ldelf_sess_cleanup(struct ts_session * sess)540 void ldelf_sess_cleanup(struct ts_session *sess)
541 {
542 struct system_ctx *sys_ctx = sess->user_ctx;
543
544 if (sys_ctx) {
545 handle_db_destroy(&sys_ctx->db, bin_close);
546 free(sys_ctx);
547 sess->user_ctx = NULL;
548 }
549 }
550