1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
46 					 TEE_MATTR_MEM_TYPE_SHIFT)
47 
select_va_in_range(const struct vm_region * prev_reg,const struct vm_region * next_reg,const struct vm_region * reg,size_t pad_begin,size_t pad_end,size_t granul)48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
alloc_pgt(struct user_mode_ctx * uctx)112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
113 {
114 	struct thread_specific_data *tsd __maybe_unused;
115 
116 	if (!pgt_check_avail(uctx)) {
117 		EMSG("Page tables are not available");
118 		return TEE_ERROR_OUT_OF_MEMORY;
119 	}
120 
121 #ifdef CFG_PAGED_USER_TA
122 	tsd = thread_get_tsd();
123 	if (uctx->ts_ctx == tsd->ctx) {
124 		/*
125 		 * The supplied utc is the current active utc, allocate the
126 		 * page tables too as the pager needs to use them soon.
127 		 */
128 		pgt_get_all(uctx);
129 	}
130 #endif
131 
132 	return TEE_SUCCESS;
133 }
134 
rem_um_region(struct user_mode_ctx * uctx,struct vm_region * r)135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
136 {
137 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
138 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
139 	struct vm_region *r2 = NULL;
140 
141 	if (mobj_is_paged(r->mobj)) {
142 		tee_pager_rem_um_region(uctx, r->va, r->size);
143 	} else {
144 		pgt_clear_range(uctx, r->va, r->va + r->size);
145 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
146 				    uctx->vm_info.asid);
147 	}
148 
149 	/*
150 	 * Figure out how much virtual memory on a CORE_MMU_PGDIR_SIZE
151 	 * grunalarity can be freed. Only completely unused
152 	 * CORE_MMU_PGDIR_SIZE ranges can be supplied to pgt_flush_range().
153 	 *
154 	 * Note that there's is no margin for error here, both flushing too
155 	 * many or too few translation tables can be fatal.
156 	 */
157 	r2 = TAILQ_NEXT(r, link);
158 	if (r2)
159 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
160 
161 	r2 = TAILQ_PREV(r, vm_region_head, link);
162 	if (r2)
163 		begin = MAX(begin,
164 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
165 
166 	if (begin < last)
167 		pgt_flush_range(uctx, begin, last);
168 }
169 
set_pa_range(struct core_mmu_table_info * ti,vaddr_t va,paddr_t pa,size_t size,uint32_t attr)170 static void set_pa_range(struct core_mmu_table_info *ti, vaddr_t va,
171 			 paddr_t pa, size_t size, uint32_t attr)
172 {
173 	unsigned int end = core_mmu_va2idx(ti, va + size);
174 	unsigned int idx = core_mmu_va2idx(ti, va);
175 
176 	while (idx < end) {
177 		core_mmu_set_entry(ti, idx, pa, attr);
178 		idx++;
179 		pa += BIT64(ti->shift);
180 	}
181 }
182 
set_reg_in_table(struct core_mmu_table_info * ti,struct vm_region * r)183 static void set_reg_in_table(struct core_mmu_table_info *ti,
184 			     struct vm_region *r)
185 {
186 	vaddr_t va = MAX(r->va, ti->va_base);
187 	vaddr_t end = MIN(r->va + r->size, ti->va_base + CORE_MMU_PGDIR_SIZE);
188 	size_t sz = MIN(end - va, mobj_get_phys_granule(r->mobj));
189 	size_t granule = BIT(ti->shift);
190 	size_t offset = 0;
191 	paddr_t pa = 0;
192 
193 	while (va < end) {
194 		offset = va - r->va + r->offset;
195 		if (mobj_get_pa(r->mobj, offset, granule, &pa))
196 			panic("Failed to get PA");
197 		set_pa_range(ti, va, pa, sz, r->attr);
198 		va += sz;
199 	}
200 }
201 
set_um_region(struct user_mode_ctx * uctx,struct vm_region * r)202 static void set_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
203 {
204 	struct pgt *p = SLIST_FIRST(&uctx->pgt_cache);
205 	struct core_mmu_table_info ti = { };
206 
207 	assert(!mobj_is_paged(r->mobj));
208 
209 	core_mmu_set_info_table(&ti, CORE_MMU_PGDIR_LEVEL, 0, NULL);
210 
211 	if (p) {
212 		/* All the pgts are already allocated, update in place */
213 		do {
214 			ti.va_base = p->vabase;
215 			ti.table = p->tbl;
216 			set_reg_in_table(&ti, r);
217 			p = SLIST_NEXT(p, link);
218 		} while (p);
219 	} else {
220 		/*
221 		 * We may have a few pgts in the cache list, update the
222 		 * ones found.
223 		 */
224 		for (ti.va_base = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
225 		     ti.va_base < r->va + r->size;
226 		     ti.va_base += CORE_MMU_PGDIR_SIZE) {
227 			p = pgt_pop_from_cache_list(ti.va_base, uctx->ts_ctx);
228 			if (!p)
229 				continue;
230 			ti.table = p->tbl;
231 			set_reg_in_table(&ti, r);
232 			pgt_push_to_cache_list(p);
233 		}
234 	}
235 }
236 
umap_add_region(struct vm_info * vmi,struct vm_region * reg,size_t pad_begin,size_t pad_end,size_t align)237 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
238 				  size_t pad_begin, size_t pad_end,
239 				  size_t align)
240 {
241 	struct vm_region dummy_first_reg = { };
242 	struct vm_region dummy_last_reg = { };
243 	struct vm_region *r = NULL;
244 	struct vm_region *prev_r = NULL;
245 	vaddr_t va_range_base = 0;
246 	size_t va_range_size = 0;
247 	size_t granul;
248 	vaddr_t va = 0;
249 	size_t offs_plus_size = 0;
250 
251 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
252 	dummy_first_reg.va = va_range_base;
253 	dummy_last_reg.va = va_range_base + va_range_size;
254 
255 	/* Check alignment, it has to be at least SMALL_PAGE based */
256 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
257 		return TEE_ERROR_ACCESS_CONFLICT;
258 
259 	/* Check that the mobj is defined for the entire range */
260 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
261 		return TEE_ERROR_BAD_PARAMETERS;
262 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
263 		return TEE_ERROR_BAD_PARAMETERS;
264 
265 	granul = MAX(align, SMALL_PAGE_SIZE);
266 	if (!IS_POWER_OF_TWO(granul))
267 		return TEE_ERROR_BAD_PARAMETERS;
268 
269 	prev_r = &dummy_first_reg;
270 	TAILQ_FOREACH(r, &vmi->regions, link) {
271 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
272 					granul);
273 		if (va) {
274 			reg->va = va;
275 			TAILQ_INSERT_BEFORE(r, reg, link);
276 			return TEE_SUCCESS;
277 		}
278 		prev_r = r;
279 	}
280 
281 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
282 	if (!r)
283 		r = &dummy_first_reg;
284 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
285 				granul);
286 	if (va) {
287 		reg->va = va;
288 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
289 		return TEE_SUCCESS;
290 	}
291 
292 	return TEE_ERROR_ACCESS_CONFLICT;
293 }
294 
vm_map_pad(struct user_mode_ctx * uctx,vaddr_t * va,size_t len,uint32_t prot,uint32_t flags,struct mobj * mobj,size_t offs,size_t pad_begin,size_t pad_end,size_t align)295 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
296 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
297 		      size_t offs, size_t pad_begin, size_t pad_end,
298 		      size_t align)
299 {
300 	TEE_Result res = TEE_SUCCESS;
301 	struct vm_region *reg = NULL;
302 	uint32_t attr = 0;
303 
304 	if (prot & ~TEE_MATTR_PROT_MASK)
305 		return TEE_ERROR_BAD_PARAMETERS;
306 
307 	reg = calloc(1, sizeof(*reg));
308 	if (!reg)
309 		return TEE_ERROR_OUT_OF_MEMORY;
310 
311 	if (!mobj_is_paged(mobj)) {
312 		uint32_t mem_type = 0;
313 
314 		res = mobj_get_mem_type(mobj, &mem_type);
315 		if (res)
316 			goto err_free_reg;
317 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
318 	}
319 	attr |= TEE_MATTR_VALID_BLOCK;
320 	if (mobj_is_secure(mobj))
321 		attr |= TEE_MATTR_SECURE;
322 
323 	reg->mobj = mobj_get(mobj);
324 	reg->offset = offs;
325 	reg->va = *va;
326 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
327 	reg->attr = attr | prot;
328 	reg->flags = flags;
329 
330 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
331 	if (res)
332 		goto err_put_mobj;
333 
334 	res = alloc_pgt(uctx);
335 	if (res)
336 		goto err_rem_reg;
337 
338 	if (mobj_is_paged(mobj)) {
339 		struct fobj *fobj = mobj_get_fobj(mobj);
340 
341 		if (!fobj) {
342 			res = TEE_ERROR_GENERIC;
343 			goto err_rem_reg;
344 		}
345 
346 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
347 		fobj_put(fobj);
348 		if (res)
349 			goto err_rem_reg;
350 	} else {
351 		set_um_region(uctx, reg);
352 	}
353 
354 	/*
355 	 * If the context currently is active set it again to update
356 	 * the mapping.
357 	 */
358 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
359 		vm_set_ctx(uctx->ts_ctx);
360 
361 	*va = reg->va;
362 
363 	return TEE_SUCCESS;
364 
365 err_rem_reg:
366 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
367 err_put_mobj:
368 	mobj_put(reg->mobj);
369 err_free_reg:
370 	free(reg);
371 	return res;
372 }
373 
find_vm_region(struct vm_info * vm_info,vaddr_t va)374 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
375 {
376 	struct vm_region *r = NULL;
377 
378 	TAILQ_FOREACH(r, &vm_info->regions, link)
379 		if (va >= r->va && va < r->va + r->size)
380 			return r;
381 
382 	return NULL;
383 }
384 
va_range_is_contiguous(struct vm_region * r0,vaddr_t va,size_t len,bool (* cmp_regs)(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn))385 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
386 				   size_t len,
387 				   bool (*cmp_regs)(const struct vm_region *r0,
388 						    const struct vm_region *r,
389 						    const struct vm_region *rn))
390 {
391 	struct vm_region *r = r0;
392 	vaddr_t end_va = 0;
393 
394 	if (ADD_OVERFLOW(va, len, &end_va))
395 		return false;
396 
397 	while (true) {
398 		struct vm_region *r_next = TAILQ_NEXT(r, link);
399 		vaddr_t r_end_va = r->va + r->size;
400 
401 		if (r_end_va >= end_va)
402 			return true;
403 		if (!r_next)
404 			return false;
405 		if (r_end_va != r_next->va)
406 			return false;
407 		if (cmp_regs && !cmp_regs(r0, r, r_next))
408 			return false;
409 		r = r_next;
410 	}
411 }
412 
split_vm_region(struct user_mode_ctx * uctx,struct vm_region * r,vaddr_t va)413 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
414 				  struct vm_region *r, vaddr_t va)
415 {
416 	struct vm_region *r2 = NULL;
417 	size_t diff = va - r->va;
418 
419 	assert(diff && diff < r->size);
420 
421 	r2 = calloc(1, sizeof(*r2));
422 	if (!r2)
423 		return TEE_ERROR_OUT_OF_MEMORY;
424 
425 	if (mobj_is_paged(r->mobj)) {
426 		TEE_Result res = tee_pager_split_um_region(uctx, va);
427 
428 		if (res) {
429 			free(r2);
430 			return res;
431 		}
432 	}
433 
434 	r2->mobj = mobj_get(r->mobj);
435 	r2->offset = r->offset + diff;
436 	r2->va = va;
437 	r2->size = r->size - diff;
438 	r2->attr = r->attr;
439 	r2->flags = r->flags;
440 
441 	r->size = diff;
442 
443 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
444 
445 	return TEE_SUCCESS;
446 }
447 
split_vm_range(struct user_mode_ctx * uctx,vaddr_t va,size_t len,bool (* cmp_regs)(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn),struct vm_region ** r0_ret)448 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
449 				 size_t len,
450 				 bool (*cmp_regs)(const struct vm_region *r0,
451 						  const struct vm_region *r,
452 						  const struct vm_region *rn),
453 				 struct vm_region **r0_ret)
454 {
455 	TEE_Result res = TEE_SUCCESS;
456 	struct vm_region *r = NULL;
457 	vaddr_t end_va = 0;
458 
459 	if ((va | len) & SMALL_PAGE_MASK)
460 		return TEE_ERROR_BAD_PARAMETERS;
461 
462 	if (ADD_OVERFLOW(va, len, &end_va))
463 		return TEE_ERROR_BAD_PARAMETERS;
464 
465 	/*
466 	 * Find first vm_region in range and check that the entire range is
467 	 * contiguous.
468 	 */
469 	r = find_vm_region(&uctx->vm_info, va);
470 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
471 		return TEE_ERROR_BAD_PARAMETERS;
472 
473 	/*
474 	 * If needed split regions so that va and len covers only complete
475 	 * regions.
476 	 */
477 	if (va != r->va) {
478 		res = split_vm_region(uctx, r, va);
479 		if (res)
480 			return res;
481 		r = TAILQ_NEXT(r, link);
482 	}
483 
484 	*r0_ret = r;
485 	r = find_vm_region(&uctx->vm_info, va + len - 1);
486 	if (!r)
487 		return TEE_ERROR_BAD_PARAMETERS;
488 	if (end_va != r->va + r->size) {
489 		res = split_vm_region(uctx, r, end_va);
490 		if (res)
491 			return res;
492 	}
493 
494 	return TEE_SUCCESS;
495 }
496 
merge_vm_range(struct user_mode_ctx * uctx,vaddr_t va,size_t len)497 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
498 {
499 	struct vm_region *r_next = NULL;
500 	struct vm_region *r = NULL;
501 	vaddr_t end_va = 0;
502 
503 	if (ADD_OVERFLOW(va, len, &end_va))
504 		return;
505 
506 	tee_pager_merge_um_region(uctx, va, len);
507 
508 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
509 		r_next = TAILQ_NEXT(r, link);
510 		if (!r_next)
511 			return;
512 
513 		/* Try merging with the region just before va */
514 		if (r->va + r->size < va)
515 			continue;
516 
517 		/*
518 		 * If r->va is well past our range we're done.
519 		 * Note that if it's just the page after our range we'll
520 		 * try to merge.
521 		 */
522 		if (r->va > end_va)
523 			return;
524 
525 		if (r->va + r->size != r_next->va)
526 			continue;
527 		if (r->mobj != r_next->mobj ||
528 		    r->flags != r_next->flags ||
529 		    r->attr != r_next->attr)
530 			continue;
531 		if (r->offset + r->size != r_next->offset)
532 			continue;
533 
534 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
535 		r->size += r_next->size;
536 		mobj_put(r_next->mobj);
537 		free(r_next);
538 		r_next = r;
539 	}
540 }
541 
cmp_region_for_remap(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn)542 static bool cmp_region_for_remap(const struct vm_region *r0,
543 				 const struct vm_region *r,
544 				 const struct vm_region *rn)
545 {
546 	/*
547 	 * All the essentionals has to match for remap to make sense. The
548 	 * essentials are, mobj/fobj, attr, flags and the offset should be
549 	 * contiguous.
550 	 *
551 	 * Note that vm_remap() depends on mobj/fobj to be the same.
552 	 */
553 	return r0->flags == r->flags && r0->attr == r->attr &&
554 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
555 }
556 
vm_remap(struct user_mode_ctx * uctx,vaddr_t * new_va,vaddr_t old_va,size_t len,size_t pad_begin,size_t pad_end)557 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
558 		    size_t len, size_t pad_begin, size_t pad_end)
559 {
560 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
561 	TEE_Result res = TEE_SUCCESS;
562 	struct vm_region *r0 = NULL;
563 	struct vm_region *r = NULL;
564 	struct vm_region *r_next = NULL;
565 	struct vm_region *r_last = NULL;
566 	struct vm_region *r_first = NULL;
567 	struct fobj *fobj = NULL;
568 	vaddr_t next_va = 0;
569 
570 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
571 
572 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
573 		return TEE_ERROR_BAD_PARAMETERS;
574 
575 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
576 	if (res)
577 		return res;
578 
579 	if (mobj_is_paged(r0->mobj)) {
580 		fobj = mobj_get_fobj(r0->mobj);
581 		if (!fobj)
582 			panic();
583 	}
584 
585 	for (r = r0; r; r = r_next) {
586 		if (r->va + r->size > old_va + len)
587 			break;
588 		r_next = TAILQ_NEXT(r, link);
589 		rem_um_region(uctx, r);
590 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
591 		TAILQ_INSERT_TAIL(&regs, r, link);
592 	}
593 
594 	/*
595 	 * Synchronize change to translation tables. Even though the pager
596 	 * case unmaps immediately we may still free a translation table.
597 	 */
598 	vm_set_ctx(uctx->ts_ctx);
599 
600 	r_first = TAILQ_FIRST(&regs);
601 	while (!TAILQ_EMPTY(&regs)) {
602 		r = TAILQ_FIRST(&regs);
603 		TAILQ_REMOVE(&regs, r, link);
604 		if (r_last) {
605 			r->va = r_last->va + r_last->size;
606 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
607 		} else {
608 			r->va = *new_va;
609 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
610 					      pad_end + len - r->size, 0);
611 		}
612 		if (!res) {
613 			r_last = r;
614 			res = alloc_pgt(uctx);
615 		}
616 		if (!res) {
617 			if (!fobj)
618 				set_um_region(uctx, r);
619 			else
620 				res = tee_pager_add_um_region(uctx, r->va, fobj,
621 							      r->attr);
622 		}
623 
624 		if (res) {
625 			/*
626 			 * Something went wrong move all the recently added
627 			 * regions back to regs for later reinsertion at
628 			 * the original spot.
629 			 */
630 			struct vm_region *r_tmp = NULL;
631 			struct vm_region *r_stop = NULL;
632 
633 			if (r != r_last) {
634 				/*
635 				 * umap_add_region() failed, move r back to
636 				 * regs before all the rest are moved back.
637 				 */
638 				TAILQ_INSERT_HEAD(&regs, r, link);
639 			}
640 			if (r_last)
641 				r_stop = TAILQ_NEXT(r_last, link);
642 			for (r = r_first; r != r_stop; r = r_next) {
643 				r_next = TAILQ_NEXT(r, link);
644 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
645 				if (r_tmp)
646 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
647 							   link);
648 				else
649 					TAILQ_INSERT_HEAD(&regs, r, link);
650 				r_tmp = r;
651 			}
652 
653 			goto err_restore_map;
654 		}
655 	}
656 
657 	fobj_put(fobj);
658 
659 	vm_set_ctx(uctx->ts_ctx);
660 	*new_va = r_first->va;
661 
662 	return TEE_SUCCESS;
663 
664 err_restore_map:
665 	next_va = old_va;
666 	while (!TAILQ_EMPTY(&regs)) {
667 		r = TAILQ_FIRST(&regs);
668 		TAILQ_REMOVE(&regs, r, link);
669 		r->va = next_va;
670 		next_va += r->size;
671 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
672 			panic("Cannot restore mapping");
673 		if (alloc_pgt(uctx))
674 			panic("Cannot restore mapping");
675 		if (fobj) {
676 			if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
677 				panic("Cannot restore mapping");
678 		} else {
679 			set_um_region(uctx, r);
680 		}
681 	}
682 	fobj_put(fobj);
683 	vm_set_ctx(uctx->ts_ctx);
684 
685 	return res;
686 }
687 
cmp_region_for_get_flags(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn __unused)688 static bool cmp_region_for_get_flags(const struct vm_region *r0,
689 				     const struct vm_region *r,
690 				     const struct vm_region *rn __unused)
691 {
692 	return r0->flags == r->flags;
693 }
694 
vm_get_flags(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint32_t * flags)695 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
696 			uint32_t *flags)
697 {
698 	struct vm_region *r = NULL;
699 
700 	if (!len || ((len | va) & SMALL_PAGE_MASK))
701 		return TEE_ERROR_BAD_PARAMETERS;
702 
703 	r = find_vm_region(&uctx->vm_info, va);
704 	if (!r)
705 		return TEE_ERROR_BAD_PARAMETERS;
706 
707 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
708 		return TEE_ERROR_BAD_PARAMETERS;
709 
710 	*flags = r->flags;
711 
712 	return TEE_SUCCESS;
713 }
714 
cmp_region_for_get_prot(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn __unused)715 static bool cmp_region_for_get_prot(const struct vm_region *r0,
716 				    const struct vm_region *r,
717 				    const struct vm_region *rn __unused)
718 {
719 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
720 	       (r->attr & TEE_MATTR_PROT_MASK);
721 }
722 
vm_get_prot(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint16_t * prot)723 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
724 		       uint16_t *prot)
725 {
726 	struct vm_region *r = NULL;
727 
728 	if (!len || ((len | va) & SMALL_PAGE_MASK))
729 		return TEE_ERROR_BAD_PARAMETERS;
730 
731 	r = find_vm_region(&uctx->vm_info, va);
732 	if (!r)
733 		return TEE_ERROR_BAD_PARAMETERS;
734 
735 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
736 		return TEE_ERROR_BAD_PARAMETERS;
737 
738 	*prot = r->attr & TEE_MATTR_PROT_MASK;
739 
740 	return TEE_SUCCESS;
741 }
742 
vm_set_prot(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint32_t prot)743 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
744 		       uint32_t prot)
745 {
746 	TEE_Result res = TEE_SUCCESS;
747 	struct vm_region *r0 = NULL;
748 	struct vm_region *r = NULL;
749 	bool was_writeable = false;
750 	bool need_sync = false;
751 
752 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
753 
754 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
755 		return TEE_ERROR_BAD_PARAMETERS;
756 
757 	res = split_vm_range(uctx, va, len, NULL, &r0);
758 	if (res)
759 		return res;
760 
761 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
762 		if (r->va + r->size > va + len)
763 			break;
764 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
765 			was_writeable = true;
766 
767 		r->attr &= ~TEE_MATTR_PROT_MASK;
768 		r->attr |= prot;
769 
770 		if (!mobj_is_paged(r->mobj)) {
771 			need_sync = true;
772 			set_um_region(uctx, r);
773 			/*
774 			 * Normally when set_um_region() is called we
775 			 * change from no mapping to some mapping, but in
776 			 * this case we change the permissions on an
777 			 * already present mapping so some TLB invalidation
778 			 * is needed. We also depend on the dsb() performed
779 			 * as part of the TLB invalidation.
780 			 */
781 			tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
782 					    uctx->vm_info.asid);
783 		}
784 	}
785 
786 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
787 		if (r->va + r->size > va + len)
788 			break;
789 		if (mobj_is_paged(r->mobj)) {
790 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
791 							  prot))
792 				panic();
793 		} else if (was_writeable) {
794 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
795 				       r->size);
796 		}
797 
798 	}
799 	if (need_sync && was_writeable)
800 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
801 
802 	merge_vm_range(uctx, va, len);
803 
804 	return TEE_SUCCESS;
805 }
806 
umap_remove_region(struct vm_info * vmi,struct vm_region * reg)807 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
808 {
809 	TAILQ_REMOVE(&vmi->regions, reg, link);
810 	mobj_put(reg->mobj);
811 	free(reg);
812 }
813 
vm_unmap(struct user_mode_ctx * uctx,vaddr_t va,size_t len)814 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
815 {
816 	TEE_Result res = TEE_SUCCESS;
817 	struct vm_region *r = NULL;
818 	struct vm_region *r_next = NULL;
819 	size_t end_va = 0;
820 	size_t unmap_end_va = 0;
821 	size_t l = 0;
822 
823 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
824 
825 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
826 		return TEE_ERROR_BAD_PARAMETERS;
827 
828 	if (!l || (va & SMALL_PAGE_MASK))
829 		return TEE_ERROR_BAD_PARAMETERS;
830 
831 	if (ADD_OVERFLOW(va, l, &end_va))
832 		return TEE_ERROR_BAD_PARAMETERS;
833 
834 	res = split_vm_range(uctx, va, l, NULL, &r);
835 	if (res)
836 		return res;
837 
838 	while (true) {
839 		r_next = TAILQ_NEXT(r, link);
840 		unmap_end_va = r->va + r->size;
841 		rem_um_region(uctx, r);
842 		umap_remove_region(&uctx->vm_info, r);
843 		if (!r_next || unmap_end_va == end_va)
844 			break;
845 		r = r_next;
846 	}
847 
848 	return TEE_SUCCESS;
849 }
850 
map_kinit(struct user_mode_ctx * uctx)851 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
852 {
853 	TEE_Result res = TEE_SUCCESS;
854 	struct mobj *mobj = NULL;
855 	size_t offs = 0;
856 	vaddr_t va = 0;
857 	size_t sz = 0;
858 	uint32_t prot = 0;
859 
860 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
861 	if (sz) {
862 		prot = TEE_MATTR_PRX;
863 		if (IS_ENABLED(CFG_CORE_BTI))
864 			prot |= TEE_MATTR_GUARDED;
865 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
866 			     mobj, offs);
867 		if (res)
868 			return res;
869 	}
870 
871 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
872 	if (sz)
873 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
874 			      mobj, offs);
875 
876 	return TEE_SUCCESS;
877 }
878 
vm_info_init(struct user_mode_ctx * uctx,struct ts_ctx * ts_ctx)879 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx)
880 {
881 	TEE_Result res;
882 	uint32_t asid = asid_alloc();
883 
884 	if (!asid) {
885 		DMSG("Failed to allocate ASID");
886 		return TEE_ERROR_GENERIC;
887 	}
888 
889 	memset(uctx, 0, sizeof(*uctx));
890 	TAILQ_INIT(&uctx->vm_info.regions);
891 	SLIST_INIT(&uctx->pgt_cache);
892 	uctx->vm_info.asid = asid;
893 	uctx->ts_ctx = ts_ctx;
894 
895 	res = map_kinit(uctx);
896 	if (res)
897 		vm_info_final(uctx);
898 	return res;
899 }
900 
vm_clean_param(struct user_mode_ctx * uctx)901 void vm_clean_param(struct user_mode_ctx *uctx)
902 {
903 	struct vm_region *next_r;
904 	struct vm_region *r;
905 
906 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
907 		if (r->flags & VM_FLAG_EPHEMERAL) {
908 			rem_um_region(uctx, r);
909 			umap_remove_region(&uctx->vm_info, r);
910 		}
911 	}
912 }
913 
check_param_map_empty(struct user_mode_ctx * uctx __maybe_unused)914 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
915 {
916 	struct vm_region *r = NULL;
917 
918 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
919 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
920 }
921 
param_mem_to_user_va(struct user_mode_ctx * uctx,struct param_mem * mem,void ** user_va)922 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
923 				       struct param_mem *mem, void **user_va)
924 {
925 	struct vm_region *region = NULL;
926 
927 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
928 		vaddr_t va = 0;
929 		size_t phys_offs = 0;
930 
931 		if (!(region->flags & VM_FLAG_EPHEMERAL))
932 			continue;
933 		if (mem->mobj != region->mobj)
934 			continue;
935 
936 		phys_offs = mobj_get_phys_offs(mem->mobj,
937 					       CORE_MMU_USER_PARAM_SIZE);
938 		phys_offs += mem->offs;
939 		if (phys_offs < region->offset)
940 			continue;
941 		if (phys_offs >= (region->offset + region->size))
942 			continue;
943 		va = region->va + phys_offs - region->offset;
944 		*user_va = (void *)va;
945 		return TEE_SUCCESS;
946 	}
947 	return TEE_ERROR_GENERIC;
948 }
949 
cmp_param_mem(const void * a0,const void * a1)950 static int cmp_param_mem(const void *a0, const void *a1)
951 {
952 	const struct param_mem *m1 = a1;
953 	const struct param_mem *m0 = a0;
954 	int ret;
955 
956 	/* Make sure that invalid param_mem are placed last in the array */
957 	if (!m0->mobj && !m1->mobj)
958 		return 0;
959 	if (!m0->mobj)
960 		return 1;
961 	if (!m1->mobj)
962 		return -1;
963 
964 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
965 	if (ret)
966 		return ret;
967 
968 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
969 	if (ret)
970 		return ret;
971 
972 	ret = CMP_TRILEAN(m0->offs, m1->offs);
973 	if (ret)
974 		return ret;
975 
976 	return CMP_TRILEAN(m0->size, m1->size);
977 }
978 
vm_map_param(struct user_mode_ctx * uctx,struct tee_ta_param * param,void * param_va[TEE_NUM_PARAMS])979 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
980 			void *param_va[TEE_NUM_PARAMS])
981 {
982 	TEE_Result res = TEE_SUCCESS;
983 	size_t n;
984 	size_t m;
985 	struct param_mem mem[TEE_NUM_PARAMS];
986 
987 	memset(mem, 0, sizeof(mem));
988 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
989 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
990 		size_t phys_offs;
991 
992 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
993 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
994 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
995 			continue;
996 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
997 					       CORE_MMU_USER_PARAM_SIZE);
998 		mem[n].mobj = param->u[n].mem.mobj;
999 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
1000 					CORE_MMU_USER_PARAM_SIZE);
1001 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
1002 				      mem[n].offs + param->u[n].mem.size,
1003 				      CORE_MMU_USER_PARAM_SIZE);
1004 		/*
1005 		 * For size 0 (raw pointer parameter), add minimum size
1006 		 * value to allow address to be mapped
1007 		 */
1008 		if (!mem[n].size)
1009 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
1010 	}
1011 
1012 	/*
1013 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
1014 	 * mobj pointer value since those entries can't be merged either,
1015 	 * finally by offset.
1016 	 *
1017 	 * This should result in a list where all mergeable entries are
1018 	 * next to each other and unused/invalid entries are at the end.
1019 	 */
1020 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
1021 
1022 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
1023 		if (mem[n].mobj == mem[m].mobj &&
1024 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
1025 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
1026 					      mem[n].offs, mem[n].size))) {
1027 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
1028 			continue;
1029 		}
1030 		m++;
1031 		if (n != m)
1032 			mem[m] = mem[n];
1033 	}
1034 	/*
1035 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
1036 	 * index of the last valid entry if the first entry is valid, else
1037 	 * 0.
1038 	 */
1039 	if (mem[0].mobj)
1040 		m++;
1041 
1042 	check_param_map_empty(uctx);
1043 
1044 	for (n = 0; n < m; n++) {
1045 		vaddr_t va = 0;
1046 
1047 		res = vm_map(uctx, &va, mem[n].size,
1048 			     TEE_MATTR_PRW | TEE_MATTR_URW,
1049 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
1050 			     mem[n].mobj, mem[n].offs);
1051 		if (res)
1052 			goto out;
1053 	}
1054 
1055 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
1056 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
1057 
1058 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
1059 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1060 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
1061 			continue;
1062 		if (!param->u[n].mem.mobj)
1063 			continue;
1064 
1065 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
1066 					   param_va + n);
1067 		if (res != TEE_SUCCESS)
1068 			goto out;
1069 	}
1070 
1071 	res = alloc_pgt(uctx);
1072 out:
1073 	if (res)
1074 		vm_clean_param(uctx);
1075 
1076 	return res;
1077 }
1078 
vm_add_rwmem(struct user_mode_ctx * uctx,struct mobj * mobj,vaddr_t * va)1079 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
1080 			vaddr_t *va)
1081 {
1082 	TEE_Result res = TEE_SUCCESS;
1083 	struct vm_region *reg = NULL;
1084 
1085 	if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj))
1086 		return TEE_ERROR_BAD_PARAMETERS;
1087 
1088 	reg = calloc(1, sizeof(*reg));
1089 	if (!reg)
1090 		return TEE_ERROR_OUT_OF_MEMORY;
1091 
1092 	reg->mobj = mobj;
1093 	reg->offset = 0;
1094 	reg->va = 0;
1095 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1096 	reg->attr = TEE_MATTR_SECURE;
1097 
1098 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1099 	if (res) {
1100 		free(reg);
1101 		return res;
1102 	}
1103 
1104 	res = alloc_pgt(uctx);
1105 	if (res)
1106 		umap_remove_region(&uctx->vm_info, reg);
1107 	else
1108 		*va = reg->va;
1109 
1110 	return res;
1111 }
1112 
vm_rem_rwmem(struct user_mode_ctx * uctx,struct mobj * mobj,vaddr_t va)1113 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1114 {
1115 	struct vm_region *r = NULL;
1116 
1117 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1118 		if (r->mobj == mobj && r->va == va) {
1119 			rem_um_region(uctx, r);
1120 			umap_remove_region(&uctx->vm_info, r);
1121 			return;
1122 		}
1123 	}
1124 }
1125 
vm_info_final(struct user_mode_ctx * uctx)1126 void vm_info_final(struct user_mode_ctx *uctx)
1127 {
1128 	if (!uctx->vm_info.asid)
1129 		return;
1130 
1131 	pgt_flush(uctx);
1132 	tee_pager_rem_um_regions(uctx);
1133 
1134 	/* clear MMU entries to avoid clash when asid is reused */
1135 	tlbi_asid(uctx->vm_info.asid);
1136 
1137 	asid_free(uctx->vm_info.asid);
1138 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1139 		umap_remove_region(&uctx->vm_info,
1140 				   TAILQ_FIRST(&uctx->vm_info.regions));
1141 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1142 }
1143 
1144 /* return true only if buffer fits inside TA private memory */
vm_buf_is_inside_um_private(const struct user_mode_ctx * uctx,const void * va,size_t size)1145 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1146 				 const void *va, size_t size)
1147 {
1148 	struct vm_region *r = NULL;
1149 
1150 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1151 		if (r->flags & VM_FLAGS_NONPRIV)
1152 			continue;
1153 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1154 			return true;
1155 	}
1156 
1157 	return false;
1158 }
1159 
1160 /* return true only if buffer intersects TA private memory */
vm_buf_intersects_um_private(const struct user_mode_ctx * uctx,const void * va,size_t size)1161 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1162 				  const void *va, size_t size)
1163 {
1164 	struct vm_region *r = NULL;
1165 
1166 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1167 		if (r->attr & VM_FLAGS_NONPRIV)
1168 			continue;
1169 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1170 			return true;
1171 	}
1172 
1173 	return false;
1174 }
1175 
vm_buf_to_mboj_offs(const struct user_mode_ctx * uctx,const void * va,size_t size,struct mobj ** mobj,size_t * offs)1176 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1177 			       const void *va, size_t size,
1178 			       struct mobj **mobj, size_t *offs)
1179 {
1180 	struct vm_region *r = NULL;
1181 
1182 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1183 		if (!r->mobj)
1184 			continue;
1185 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1186 			size_t poffs;
1187 
1188 			poffs = mobj_get_phys_offs(r->mobj,
1189 						   CORE_MMU_USER_PARAM_SIZE);
1190 			*mobj = r->mobj;
1191 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1192 			return TEE_SUCCESS;
1193 		}
1194 	}
1195 
1196 	return TEE_ERROR_BAD_PARAMETERS;
1197 }
1198 
tee_mmu_user_va2pa_attr(const struct user_mode_ctx * uctx,void * ua,paddr_t * pa,uint32_t * attr)1199 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1200 					  void *ua, paddr_t *pa, uint32_t *attr)
1201 {
1202 	struct vm_region *region = NULL;
1203 
1204 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1205 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1206 					   region->size))
1207 			continue;
1208 
1209 		if (pa) {
1210 			TEE_Result res;
1211 			paddr_t p;
1212 			size_t offset;
1213 			size_t granule;
1214 
1215 			/*
1216 			 * mobj and input user address may each include
1217 			 * a specific offset-in-granule position.
1218 			 * Drop both to get target physical page base
1219 			 * address then apply only user address
1220 			 * offset-in-granule.
1221 			 * Mapping lowest granule is the small page.
1222 			 */
1223 			granule = MAX(region->mobj->phys_granule,
1224 				      (size_t)SMALL_PAGE_SIZE);
1225 			assert(!granule || IS_POWER_OF_TWO(granule));
1226 
1227 			offset = region->offset +
1228 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1229 
1230 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1231 			if (res != TEE_SUCCESS)
1232 				return res;
1233 
1234 			*pa = p | ((vaddr_t)ua & (granule - 1));
1235 		}
1236 		if (attr)
1237 			*attr = region->attr;
1238 
1239 		return TEE_SUCCESS;
1240 	}
1241 
1242 	return TEE_ERROR_ACCESS_DENIED;
1243 }
1244 
vm_va2pa(const struct user_mode_ctx * uctx,void * ua,paddr_t * pa)1245 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1246 {
1247 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1248 }
1249 
vm_pa2va(const struct user_mode_ctx * uctx,paddr_t pa,size_t pa_size)1250 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1251 {
1252 	paddr_t p = 0;
1253 	struct vm_region *region = NULL;
1254 
1255 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1256 		size_t granule = 0;
1257 		size_t size = 0;
1258 		size_t ofs = 0;
1259 
1260 		/* pa2va is expected only for memory tracked through mobj */
1261 		if (!region->mobj)
1262 			continue;
1263 
1264 		/* Physically granulated memory object must be scanned */
1265 		granule = region->mobj->phys_granule;
1266 		assert(!granule || IS_POWER_OF_TWO(granule));
1267 
1268 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1269 
1270 			if (granule) {
1271 				/* From current offset to buffer/granule end */
1272 				size = granule - (ofs & (granule - 1));
1273 
1274 				if (size > (region->size - ofs))
1275 					size = region->size - ofs;
1276 			} else {
1277 				size = region->size;
1278 			}
1279 
1280 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1281 				continue;
1282 
1283 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1284 				/* Remove region offset (mobj phys offset) */
1285 				ofs -= region->offset;
1286 				/* Get offset-in-granule */
1287 				p = pa - p;
1288 
1289 				return (void *)(region->va + ofs + (vaddr_t)p);
1290 			}
1291 		}
1292 	}
1293 
1294 	return NULL;
1295 }
1296 
vm_check_access_rights(const struct user_mode_ctx * uctx,uint32_t flags,uaddr_t uaddr,size_t len)1297 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1298 				  uint32_t flags, uaddr_t uaddr, size_t len)
1299 {
1300 	uaddr_t a = 0;
1301 	uaddr_t end_addr = 0;
1302 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1303 			       CORE_MMU_USER_PARAM_SIZE);
1304 
1305 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1306 		return TEE_ERROR_ACCESS_DENIED;
1307 
1308 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1309 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1310 		return TEE_ERROR_ACCESS_DENIED;
1311 
1312 	/*
1313 	 * Rely on TA private memory test to check if address range is private
1314 	 * to TA or not.
1315 	 */
1316 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1317 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1318 		return TEE_ERROR_ACCESS_DENIED;
1319 
1320 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1321 		uint32_t attr;
1322 		TEE_Result res;
1323 
1324 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1325 		if (res != TEE_SUCCESS)
1326 			return res;
1327 
1328 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1329 		    (attr & TEE_MATTR_SECURE))
1330 			return TEE_ERROR_ACCESS_DENIED;
1331 
1332 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1333 		    !(attr & TEE_MATTR_SECURE))
1334 			return TEE_ERROR_ACCESS_DENIED;
1335 
1336 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1337 			return TEE_ERROR_ACCESS_DENIED;
1338 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1339 			return TEE_ERROR_ACCESS_DENIED;
1340 	}
1341 
1342 	return TEE_SUCCESS;
1343 }
1344 
vm_set_ctx(struct ts_ctx * ctx)1345 void vm_set_ctx(struct ts_ctx *ctx)
1346 {
1347 	struct thread_specific_data *tsd = thread_get_tsd();
1348 	struct user_mode_ctx *uctx = NULL;
1349 
1350 	core_mmu_set_user_map(NULL);
1351 
1352 	if (is_user_mode_ctx(tsd->ctx)) {
1353 		/*
1354 		 * We're coming from a user mode context so we must make
1355 		 * the pgts available for reuse.
1356 		 */
1357 		uctx = to_user_mode_ctx(tsd->ctx);
1358 		pgt_put_all(uctx);
1359 	}
1360 
1361 	if (is_user_mode_ctx(ctx)) {
1362 		struct core_mmu_user_map map = { };
1363 
1364 		uctx = to_user_mode_ctx(ctx);
1365 		core_mmu_create_user_map(uctx, &map);
1366 		core_mmu_set_user_map(&map);
1367 		tee_pager_assign_um_tables(uctx);
1368 	}
1369 	tsd->ctx = ctx;
1370 }
1371 
vm_get_mobj(struct user_mode_ctx * uctx,vaddr_t va,size_t * len,uint16_t * prot,size_t * offs)1372 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1373 			 uint16_t *prot, size_t *offs)
1374 {
1375 	struct vm_region *r = NULL;
1376 	size_t r_offs = 0;
1377 
1378 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1379 		return NULL;
1380 
1381 	r = find_vm_region(&uctx->vm_info, va);
1382 	if (!r)
1383 		return NULL;
1384 
1385 	r_offs = va - r->va;
1386 
1387 	*len = MIN(r->size - r_offs, *len);
1388 	*offs = r->offset + r_offs;
1389 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1390 	return mobj_get(r->mobj);
1391 }
1392