1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, 2022 Linaro Limited
4  * Copyright (c) 2020-2021, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <crypto/crypto.h>
9 #include <kernel/ldelf_syscalls.h>
10 #include <kernel/user_access.h>
11 #include <kernel/user_mode_ctx.h>
12 #include <ldelf.h>
13 #include <mm/file.h>
14 #include <mm/fobj.h>
15 #include <mm/mobj.h>
16 #include <mm/vm.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <trace.h>
20 #include <util.h>
21 
22 struct bin_handle {
23 	const struct ts_store_ops *op;
24 	struct ts_store_handle *h;
25 	struct file *f;
26 	size_t offs_bytes;
27 	size_t size_bytes;
28 };
29 
unmap_or_panic(struct user_mode_ctx * uctx,vaddr_t va,size_t byte_count)30 static void unmap_or_panic(struct user_mode_ctx *uctx, vaddr_t va,
31 			   size_t byte_count)
32 {
33 	TEE_Result res = vm_unmap(uctx, va, byte_count);
34 
35 	if (res) {
36 		EMSG("vm_unmap(%#"PRIxVA", %#zx) returned %#"PRIx32,
37 		     va, byte_count, res);
38 		panic("Can't restore memory map");
39 	}
40 }
41 
ldelf_syscall_map_zi(vaddr_t * va,size_t num_bytes,size_t pad_begin,size_t pad_end,unsigned long flags)42 TEE_Result ldelf_syscall_map_zi(vaddr_t *va, size_t num_bytes, size_t pad_begin,
43 				size_t pad_end, unsigned long flags)
44 {
45 	TEE_Result res = TEE_SUCCESS;
46 	struct ts_session *sess = ts_get_current_session();
47 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
48 	struct fobj *f = NULL;
49 	struct mobj *mobj = NULL;
50 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
51 	uint32_t vm_flags = 0;
52 	vaddr_t va_copy = 0;
53 
54 	if (flags & ~LDELF_MAP_FLAG_SHAREABLE)
55 		return TEE_ERROR_BAD_PARAMETERS;
56 
57 	res = GET_USER_SCALAR(va_copy, va);
58 	if (res)
59 		return res;
60 
61 	if (flags & LDELF_MAP_FLAG_SHAREABLE)
62 		vm_flags |= VM_FLAG_SHAREABLE;
63 
64 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
65 	if (!f)
66 		return TEE_ERROR_OUT_OF_MEMORY;
67 	mobj = mobj_with_fobj_alloc(f, NULL, TEE_MATTR_MEM_TYPE_TAGGED);
68 	fobj_put(f);
69 	if (!mobj)
70 		return TEE_ERROR_OUT_OF_MEMORY;
71 	res = vm_map_pad(uctx, &va_copy, num_bytes, prot, vm_flags,
72 			 mobj, 0, pad_begin, pad_end, 0);
73 	mobj_put(mobj);
74 	if (!res) {
75 		res = PUT_USER_SCALAR(va_copy, va);
76 		if (res)
77 			unmap_or_panic(uctx, va_copy, num_bytes);
78 	}
79 
80 	return res;
81 }
82 
ldelf_syscall_unmap(vaddr_t va,size_t num_bytes)83 TEE_Result ldelf_syscall_unmap(vaddr_t va, size_t num_bytes)
84 {
85 	TEE_Result res = TEE_SUCCESS;
86 	struct ts_session *sess = ts_get_current_session();
87 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
88 	size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
89 	uint32_t vm_flags = 0;
90 	vaddr_t end_va = 0;
91 
92 	/*
93 	 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
94 	 * overflow directly or indirectly. However, since this function is an
95 	 * API function it's worth having an extra guard here. If nothing else,
96 	 * to increase code clarity.
97 	 */
98 	if (ADD_OVERFLOW(va, sz, &end_va))
99 		return TEE_ERROR_BAD_PARAMETERS;
100 
101 	res = vm_get_flags(uctx, va, sz, &vm_flags);
102 	if (res)
103 		return res;
104 	if (vm_flags & VM_FLAG_PERMANENT)
105 		return TEE_ERROR_ACCESS_DENIED;
106 
107 	return vm_unmap(uctx, va, sz);
108 }
109 
bin_close(void * ptr)110 static void bin_close(void *ptr)
111 {
112 	struct bin_handle *binh = ptr;
113 
114 	if (binh) {
115 		if (binh->op && binh->h)
116 			binh->op->close(binh->h);
117 		file_put(binh->f);
118 	}
119 	free(binh);
120 }
121 
ldelf_syscall_open_bin(const TEE_UUID * uuid,size_t uuid_size,uint32_t * handle)122 TEE_Result ldelf_syscall_open_bin(const TEE_UUID *uuid, size_t uuid_size,
123 				  uint32_t *handle)
124 {
125 	TEE_Result res = TEE_SUCCESS;
126 	struct ts_session *sess = ts_get_current_session();
127 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
128 	struct system_ctx *sys_ctx = sess->user_ctx;
129 	struct bin_handle *binh = NULL;
130 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
131 	unsigned int tag_len = sizeof(tag);
132 	TEE_UUID *bb_uuid = NULL;
133 	int h = 0;
134 
135 	res = BB_MEMDUP_USER(uuid, sizeof(*uuid), &bb_uuid);
136 	if (res)
137 		return res;
138 
139 	res = vm_check_access_rights(uctx,
140 				     TEE_MEMORY_ACCESS_WRITE |
141 				     TEE_MEMORY_ACCESS_ANY_OWNER,
142 				     (uaddr_t)handle, sizeof(uint32_t));
143 	if (res)
144 		return res;
145 
146 	if (uuid_size != sizeof(*uuid))
147 		return TEE_ERROR_BAD_PARAMETERS;
148 
149 	if (!sys_ctx) {
150 		sys_ctx = calloc(1, sizeof(*sys_ctx));
151 		if (!sys_ctx)
152 			return TEE_ERROR_OUT_OF_MEMORY;
153 		sess->user_ctx = sys_ctx;
154 	}
155 
156 	binh = calloc(1, sizeof(*binh));
157 	if (!binh)
158 		return TEE_ERROR_OUT_OF_MEMORY;
159 
160 	if (is_user_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)) {
161 		SCATTERED_ARRAY_FOREACH(binh->op, ta_stores,
162 					struct ts_store_ops) {
163 			DMSG("Lookup user TA ELF %pUl (%s)",
164 			     (void *)bb_uuid, binh->op->description);
165 
166 			res = binh->op->open(bb_uuid, &binh->h);
167 			DMSG("res=%#"PRIx32, res);
168 			if (res != TEE_ERROR_ITEM_NOT_FOUND &&
169 			    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
170 				break;
171 		}
172 	} else if (is_sp_ctx(sess->ctx)) {
173 		SCATTERED_ARRAY_FOREACH(binh->op, sp_stores,
174 					struct ts_store_ops) {
175 			DMSG("Lookup user SP ELF %pUl (%s)",
176 			     (void *)bb_uuid, binh->op->description);
177 
178 			res = binh->op->open(bb_uuid, &binh->h);
179 			DMSG("res=%#"PRIx32, res);
180 			if (res != TEE_ERROR_ITEM_NOT_FOUND &&
181 			    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
182 				break;
183 		}
184 	} else {
185 		res = TEE_ERROR_ITEM_NOT_FOUND;
186 	}
187 
188 	if (res)
189 		goto err;
190 
191 	res = binh->op->get_size(binh->h, &binh->size_bytes);
192 	if (res)
193 		goto err;
194 	res = binh->op->get_tag(binh->h, tag, &tag_len);
195 	if (res)
196 		goto err;
197 	binh->f = file_get_by_tag(tag, tag_len);
198 	if (!binh->f)
199 		goto err_oom;
200 
201 	h = handle_get(&sys_ctx->db, binh);
202 	if (h < 0)
203 		goto err_oom;
204 	res = PUT_USER_SCALAR(h, handle);
205 	if (res) {
206 		handle_put(&sys_ctx->db, h);
207 		goto err;
208 	}
209 
210 	return TEE_SUCCESS;
211 
212 err_oom:
213 	res = TEE_ERROR_OUT_OF_MEMORY;
214 err:
215 	bin_close(binh);
216 	return res;
217 }
218 
ldelf_syscall_close_bin(unsigned long handle)219 TEE_Result ldelf_syscall_close_bin(unsigned long handle)
220 {
221 	TEE_Result res = TEE_SUCCESS;
222 	struct ts_session *sess = ts_get_current_session();
223 	struct system_ctx *sys_ctx = sess->user_ctx;
224 	struct bin_handle *binh = NULL;
225 
226 	if (!sys_ctx)
227 		return TEE_ERROR_BAD_PARAMETERS;
228 
229 	binh = handle_put(&sys_ctx->db, handle);
230 	if (!binh)
231 		return TEE_ERROR_BAD_PARAMETERS;
232 
233 	if (binh->offs_bytes < binh->size_bytes)
234 		res = binh->op->read(binh->h, NULL, NULL,
235 				     binh->size_bytes - binh->offs_bytes);
236 
237 	bin_close(binh);
238 	if (handle_db_is_empty(&sys_ctx->db)) {
239 		handle_db_destroy(&sys_ctx->db, bin_close);
240 		free(sys_ctx);
241 		sess->user_ctx = NULL;
242 	}
243 
244 	return res;
245 }
246 
binh_copy_to(struct bin_handle * binh,vaddr_t va_core,vaddr_t va_user,size_t offs_bytes,size_t num_bytes)247 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va_core,
248 			       vaddr_t va_user, size_t offs_bytes,
249 			       size_t num_bytes)
250 {
251 	TEE_Result res = TEE_SUCCESS;
252 	size_t next_offs = 0;
253 
254 	if (offs_bytes < binh->offs_bytes)
255 		return TEE_ERROR_BAD_STATE;
256 
257 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
258 		return TEE_ERROR_BAD_PARAMETERS;
259 
260 	if (offs_bytes > binh->offs_bytes) {
261 		res = binh->op->read(binh->h, NULL, NULL,
262 				     offs_bytes - binh->offs_bytes);
263 		if (res)
264 			return res;
265 		binh->offs_bytes = offs_bytes;
266 	}
267 
268 	if (next_offs > binh->size_bytes) {
269 		size_t rb = binh->size_bytes - binh->offs_bytes;
270 
271 		res = binh->op->read(binh->h, (void *)va_core,
272 				     (void *)va_user, rb);
273 		if (res)
274 			return res;
275 		if (va_core)
276 			memset((uint8_t *)va_core + rb, 0, num_bytes - rb);
277 		if (va_user) {
278 			res = clear_user((uint8_t *)va_user + rb,
279 					 num_bytes - rb);
280 			if (res)
281 				return res;
282 		}
283 		binh->offs_bytes = binh->size_bytes;
284 	} else {
285 		res = binh->op->read(binh->h, (void *)va_core,
286 				     (void *)va_user, num_bytes);
287 		if (res)
288 			return res;
289 		binh->offs_bytes = next_offs;
290 	}
291 
292 	return TEE_SUCCESS;
293 }
294 
ldelf_syscall_map_bin(vaddr_t * va,size_t num_bytes,unsigned long handle,size_t offs_bytes,size_t pad_begin,size_t pad_end,unsigned long flags)295 TEE_Result ldelf_syscall_map_bin(vaddr_t *va, size_t num_bytes,
296 				 unsigned long handle, size_t offs_bytes,
297 				 size_t pad_begin, size_t pad_end,
298 				 unsigned long flags)
299 {
300 	TEE_Result res = TEE_SUCCESS;
301 	struct ts_session *sess = ts_get_current_session();
302 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
303 	struct system_ctx *sys_ctx = sess->user_ctx;
304 	struct bin_handle *binh = NULL;
305 	uint32_t num_rounded_bytes = 0;
306 	struct file_slice *fs = NULL;
307 	bool file_is_locked = false;
308 	struct mobj *mobj = NULL;
309 	uint32_t offs_pages = 0;
310 	size_t num_pages = 0;
311 	vaddr_t va_copy = 0;
312 	uint32_t prot = 0;
313 	const uint32_t accept_flags = LDELF_MAP_FLAG_SHAREABLE |
314 				      LDELF_MAP_FLAG_WRITEABLE |
315 				      LDELF_MAP_FLAG_BTI |
316 				      LDELF_MAP_FLAG_EXECUTABLE;
317 
318 	res = GET_USER_SCALAR(va_copy, va);
319 	if (res)
320 		return res;
321 
322 	if (!sys_ctx)
323 		return TEE_ERROR_BAD_PARAMETERS;
324 
325 	binh = handle_lookup(&sys_ctx->db, handle);
326 	if (!binh)
327 		return TEE_ERROR_BAD_PARAMETERS;
328 
329 	if ((flags & accept_flags) != flags)
330 		return TEE_ERROR_BAD_PARAMETERS;
331 
332 	if ((flags & LDELF_MAP_FLAG_SHAREABLE) &&
333 	    (flags & LDELF_MAP_FLAG_WRITEABLE))
334 		return TEE_ERROR_BAD_PARAMETERS;
335 
336 	if ((flags & LDELF_MAP_FLAG_EXECUTABLE) &&
337 	    (flags & LDELF_MAP_FLAG_WRITEABLE))
338 		return TEE_ERROR_BAD_PARAMETERS;
339 
340 	if (offs_bytes & SMALL_PAGE_MASK)
341 		return TEE_ERROR_BAD_PARAMETERS;
342 
343 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
344 	if (flags & LDELF_MAP_FLAG_WRITEABLE)
345 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
346 	if (flags & LDELF_MAP_FLAG_EXECUTABLE)
347 		prot |= TEE_MATTR_UX;
348 	if (flags & LDELF_MAP_FLAG_BTI)
349 		prot |= TEE_MATTR_GUARDED;
350 
351 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
352 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
353 		return TEE_ERROR_BAD_PARAMETERS;
354 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
355 
356 	if (!file_trylock(binh->f)) {
357 		/*
358 		 * Before we can block on the file lock we must make all
359 		 * our page tables available for reclaiming in order to
360 		 * avoid a dead-lock with the other thread (which already
361 		 * is holding the file lock) mapping lots of memory below.
362 		 */
363 		vm_set_ctx(NULL);
364 		file_lock(binh->f);
365 		vm_set_ctx(uctx->ts_ctx);
366 	}
367 	file_is_locked = true;
368 	fs = file_find_slice(binh->f, offs_pages);
369 	if (fs) {
370 		/* If there's registered slice it has to match */
371 		if (fs->page_offset != offs_pages ||
372 		    num_pages > fs->fobj->num_pages) {
373 			res = TEE_ERROR_BAD_PARAMETERS;
374 			goto err;
375 		}
376 
377 		/* If there's a slice we must be mapping shareable */
378 		if (!(flags & LDELF_MAP_FLAG_SHAREABLE)) {
379 			res = TEE_ERROR_BAD_PARAMETERS;
380 			goto err;
381 		}
382 
383 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f,
384 					    TEE_MATTR_MEM_TYPE_TAGGED);
385 		if (!mobj) {
386 			res = TEE_ERROR_OUT_OF_MEMORY;
387 			goto err;
388 		}
389 		res = vm_map_pad(uctx, &va_copy, num_rounded_bytes,
390 				 prot, VM_FLAG_READONLY,
391 				 mobj, 0, pad_begin, pad_end, 0);
392 		mobj_put(mobj);
393 		if (res)
394 			goto err;
395 	} else {
396 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
397 		struct file *file = NULL;
398 		uint32_t vm_flags = 0;
399 
400 		if (!f) {
401 			res = TEE_ERROR_OUT_OF_MEMORY;
402 			goto err;
403 		}
404 		if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
405 			file = binh->f;
406 			vm_flags |= VM_FLAG_READONLY;
407 		}
408 
409 		mobj = mobj_with_fobj_alloc(f, file, TEE_MATTR_MEM_TYPE_TAGGED);
410 		fobj_put(f);
411 		if (!mobj) {
412 			res = TEE_ERROR_OUT_OF_MEMORY;
413 			goto err;
414 		}
415 		res = vm_map_pad(uctx, &va_copy, num_rounded_bytes,
416 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
417 				 pad_begin, pad_end, 0);
418 		mobj_put(mobj);
419 		if (res)
420 			goto err;
421 		res = binh_copy_to(binh, va_copy, 0, offs_bytes, num_bytes);
422 		if (res)
423 			goto err_unmap_va;
424 		res = vm_set_prot(uctx, va_copy, num_rounded_bytes,
425 				  prot);
426 		if (res)
427 			goto err_unmap_va;
428 
429 		/*
430 		 * The context currently is active set it again to update
431 		 * the mapping.
432 		 */
433 		vm_set_ctx(uctx->ts_ctx);
434 
435 		if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
436 			res = file_add_slice(binh->f, f, offs_pages);
437 			if (res)
438 				goto err_unmap_va;
439 		}
440 	}
441 
442 	res = PUT_USER_SCALAR(va_copy, va);
443 	if (res)
444 		goto err_unmap_va;
445 
446 	file_unlock(binh->f);
447 
448 	return TEE_SUCCESS;
449 
450 err_unmap_va:
451 	unmap_or_panic(uctx, va_copy, num_rounded_bytes);
452 
453 	/*
454 	 * The context currently is active set it again to update
455 	 * the mapping.
456 	 */
457 	vm_set_ctx(uctx->ts_ctx);
458 
459 err:
460 	if (file_is_locked)
461 		file_unlock(binh->f);
462 
463 	return res;
464 }
465 
ldelf_syscall_copy_from_bin(void * dst,size_t offs,size_t num_bytes,unsigned long handle)466 TEE_Result ldelf_syscall_copy_from_bin(void *dst, size_t offs, size_t num_bytes,
467 				       unsigned long handle)
468 {
469 	TEE_Result res = TEE_SUCCESS;
470 	struct ts_session *sess = ts_get_current_session();
471 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
472 	struct system_ctx *sys_ctx = sess->user_ctx;
473 	struct bin_handle *binh = NULL;
474 
475 	res = vm_check_access_rights(uctx,
476 				     TEE_MEMORY_ACCESS_WRITE |
477 				     TEE_MEMORY_ACCESS_ANY_OWNER,
478 				     (uaddr_t)dst, num_bytes);
479 	if (res)
480 		return res;
481 
482 	if (!sys_ctx)
483 		return TEE_ERROR_BAD_PARAMETERS;
484 
485 	binh = handle_lookup(&sys_ctx->db, handle);
486 	if (!binh)
487 		return TEE_ERROR_BAD_PARAMETERS;
488 
489 	return binh_copy_to(binh, 0, (vaddr_t)dst, offs, num_bytes);
490 }
491 
ldelf_syscall_set_prot(unsigned long va,size_t num_bytes,unsigned long flags)492 TEE_Result ldelf_syscall_set_prot(unsigned long va, size_t num_bytes,
493 				  unsigned long flags)
494 {
495 	TEE_Result res = TEE_SUCCESS;
496 	struct ts_session *sess = ts_get_current_session();
497 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
498 	size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
499 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
500 	uint32_t vm_flags = 0;
501 	vaddr_t end_va = 0;
502 	const uint32_t accept_flags = LDELF_MAP_FLAG_WRITEABLE |
503 				      LDELF_MAP_FLAG_BTI |
504 				      LDELF_MAP_FLAG_EXECUTABLE;
505 
506 	if ((flags & accept_flags) != flags)
507 		return TEE_ERROR_BAD_PARAMETERS;
508 	if (flags & LDELF_MAP_FLAG_WRITEABLE)
509 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
510 	if (flags & LDELF_MAP_FLAG_EXECUTABLE)
511 		prot |= TEE_MATTR_UX;
512 	if (flags & LDELF_MAP_FLAG_BTI)
513 		prot |= TEE_MATTR_GUARDED;
514 
515 	/*
516 	 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
517 	 * overflow directly or indirectly. However, since this function is an
518 	 * API function it's worth having an extra guard here. If nothing else,
519 	 * to increase code clarity.
520 	 */
521 	if (ADD_OVERFLOW(va, sz, &end_va))
522 		return TEE_ERROR_BAD_PARAMETERS;
523 
524 	res = vm_get_flags(uctx, va, sz, &vm_flags);
525 	if (res)
526 		return res;
527 	if (vm_flags & VM_FLAG_PERMANENT)
528 		return TEE_ERROR_ACCESS_DENIED;
529 
530 	/*
531 	 * If the segment is a mapping of a part of a file (vm_flags &
532 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
533 	 * files are mapped read-only.
534 	 */
535 	if ((vm_flags & VM_FLAG_READONLY) &&
536 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
537 		return TEE_ERROR_ACCESS_DENIED;
538 
539 	return vm_set_prot(uctx, va, sz, prot);
540 }
541 
ldelf_syscall_remap(unsigned long old_va,vaddr_t * new_va,size_t num_bytes,size_t pad_begin,size_t pad_end)542 TEE_Result ldelf_syscall_remap(unsigned long old_va, vaddr_t *new_va,
543 			       size_t num_bytes, size_t pad_begin,
544 			       size_t pad_end)
545 {
546 	TEE_Result res = TEE_SUCCESS;
547 	struct ts_session *sess = ts_get_current_session();
548 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
549 	uint32_t vm_flags = 0;
550 	vaddr_t va_copy = 0;
551 
552 	res = GET_USER_SCALAR(va_copy, new_va);
553 	if (res)
554 		return res;
555 	res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags);
556 	if (res)
557 		return res;
558 	if (vm_flags & VM_FLAG_PERMANENT)
559 		return TEE_ERROR_ACCESS_DENIED;
560 
561 	res = vm_remap(uctx, &va_copy, old_va, num_bytes, pad_begin, pad_end);
562 	if (res)
563 		return res;
564 
565 	res = PUT_USER_SCALAR(va_copy, new_va);
566 	if (res) {
567 		TEE_Result res2 = TEE_SUCCESS;
568 		vaddr_t va = old_va;
569 
570 		res2 = vm_remap(uctx, &va, va_copy, num_bytes, 0, 0);
571 		if (res2) {
572 			EMSG("vm_remap(%#"PRIxVA", %#"PRIxVA", %#zx) returned %#"PRIx32,
573 			     va, va_copy, num_bytes, res2);
574 			panic("Can't restore memory map");
575 		}
576 		return res;
577 	}
578 
579 	return TEE_SUCCESS;
580 }
581 
ldelf_syscall_gen_rnd_num(void * buf,size_t num_bytes)582 TEE_Result ldelf_syscall_gen_rnd_num(void *buf, size_t num_bytes)
583 {
584 	TEE_Result res = TEE_SUCCESS;
585 	void *bb = NULL;
586 
587 	bb = bb_alloc(num_bytes);
588 	if (!bb)
589 		return TEE_ERROR_OUT_OF_MEMORY;
590 
591 	res = crypto_rng_read(bb, num_bytes);
592 	if (res)
593 		return res;
594 
595 	return copy_to_user(buf, bb, num_bytes);
596 }
597 
598 /*
599  * Should be called after returning from ldelf. If user_ctx is not NULL means
600  * that ldelf crashed or otherwise didn't complete properly. This function will
601  * close the remaining handles and free the context structs allocated by ldelf.
602  */
ldelf_sess_cleanup(struct ts_session * sess)603 void ldelf_sess_cleanup(struct ts_session *sess)
604 {
605 	struct system_ctx *sys_ctx = sess->user_ctx;
606 
607 	if (sys_ctx) {
608 		handle_db_destroy(&sys_ctx->db, bin_close);
609 		free(sys_ctx);
610 		sess->user_ctx = NULL;
611 	}
612 }
613