1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct vm_paged_region_head core_vm_regions =
36 	TAILQ_HEAD_INITIALIZER(core_vm_regions);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 struct tblidx {
61 	struct pgt *pgt;
62 	unsigned int idx;
63 };
64 
65 /* The list of physical pages. The first page in the list is the oldest */
66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
67 
68 static struct tee_pager_pmem_head tee_pager_pmem_head =
69 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
70 
71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
73 
74 /* number of pages hidden */
75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
76 
77 /* Number of registered physical pages, used hiding pages. */
78 static size_t tee_pager_npages;
79 
80 /* This area covers the IVs for all fobjs with paged IVs */
81 static struct vm_paged_region *pager_iv_region;
82 /* Used by make_iv_available(), see make_iv_available() for details. */
83 static struct tee_pager_pmem *pager_spare_pmem;
84 
85 #ifdef CFG_WITH_STATS
86 static struct tee_pager_stats pager_stats;
87 
incr_ro_hits(void)88 static inline void incr_ro_hits(void)
89 {
90 	pager_stats.ro_hits++;
91 }
92 
incr_rw_hits(void)93 static inline void incr_rw_hits(void)
94 {
95 	pager_stats.rw_hits++;
96 }
97 
incr_hidden_hits(void)98 static inline void incr_hidden_hits(void)
99 {
100 	pager_stats.hidden_hits++;
101 }
102 
incr_zi_released(void)103 static inline void incr_zi_released(void)
104 {
105 	pager_stats.zi_released++;
106 }
107 
incr_npages_all(void)108 static inline void incr_npages_all(void)
109 {
110 	pager_stats.npages_all++;
111 }
112 
set_npages(void)113 static inline void set_npages(void)
114 {
115 	pager_stats.npages = tee_pager_npages;
116 }
117 
tee_pager_get_stats(struct tee_pager_stats * stats)118 void tee_pager_get_stats(struct tee_pager_stats *stats)
119 {
120 	*stats = pager_stats;
121 
122 	pager_stats.hidden_hits = 0;
123 	pager_stats.ro_hits = 0;
124 	pager_stats.rw_hits = 0;
125 	pager_stats.zi_released = 0;
126 }
127 
128 #else /* CFG_WITH_STATS */
incr_ro_hits(void)129 static inline void incr_ro_hits(void) { }
incr_rw_hits(void)130 static inline void incr_rw_hits(void) { }
incr_hidden_hits(void)131 static inline void incr_hidden_hits(void) { }
incr_zi_released(void)132 static inline void incr_zi_released(void) { }
incr_npages_all(void)133 static inline void incr_npages_all(void) { }
set_npages(void)134 static inline void set_npages(void) { }
135 
tee_pager_get_stats(struct tee_pager_stats * stats)136 void tee_pager_get_stats(struct tee_pager_stats *stats)
137 {
138 	memset(stats, 0, sizeof(struct tee_pager_stats));
139 }
140 #endif /* CFG_WITH_STATS */
141 
142 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
143 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
144 #define TBL_SHIFT	SMALL_PAGE_SHIFT
145 
146 #define EFFECTIVE_VA_SIZE \
147 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
148 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
149 
150 static struct pager_table {
151 	struct pgt pgt;
152 	struct core_mmu_table_info tbl_info;
153 } *pager_tables;
154 static unsigned int num_pager_tables;
155 
156 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
157 
158 /* Defines the range of the alias area */
159 static tee_mm_entry_t *pager_alias_area;
160 /*
161  * Physical pages are added in a stack like fashion to the alias area,
162  * @pager_alias_next_free gives the address of next free entry if
163  * @pager_alias_next_free is != 0
164  */
165 static uintptr_t pager_alias_next_free;
166 
167 #ifdef CFG_TEE_CORE_DEBUG
168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
169 
pager_lock_dldetect(const char * func,const int line,struct abort_info * ai)170 static uint32_t pager_lock_dldetect(const char *func, const int line,
171 				    struct abort_info *ai)
172 {
173 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
174 	unsigned int retries = 0;
175 	unsigned int reminder = 0;
176 
177 	while (!cpu_spin_trylock(&pager_spinlock)) {
178 		retries++;
179 		if (!retries) {
180 			/* wrapped, time to report */
181 			trace_printf(func, line, TRACE_ERROR, true,
182 				     "possible spinlock deadlock reminder %u",
183 				     reminder);
184 			if (reminder < UINT_MAX)
185 				reminder++;
186 			if (ai)
187 				abort_print(ai);
188 		}
189 	}
190 
191 	return exceptions;
192 }
193 #else
pager_lock(struct abort_info __unused * ai)194 static uint32_t pager_lock(struct abort_info __unused *ai)
195 {
196 	return cpu_spin_lock_xsave(&pager_spinlock);
197 }
198 #endif
199 
pager_lock_check_stack(size_t stack_size)200 static uint32_t pager_lock_check_stack(size_t stack_size)
201 {
202 	if (stack_size) {
203 		int8_t buf[stack_size];
204 		size_t n;
205 
206 		/*
207 		 * Make sure to touch all pages of the stack that we expect
208 		 * to use with this lock held. We need to take eventual
209 		 * page faults before the lock is taken or we'll deadlock
210 		 * the pager. The pages that are populated in this way will
211 		 * eventually be released at certain save transitions of
212 		 * the thread.
213 		 */
214 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
215 			io_write8((vaddr_t)buf + n, 1);
216 		io_write8((vaddr_t)buf + stack_size - 1, 1);
217 	}
218 
219 	return pager_lock(NULL);
220 }
221 
pager_unlock(uint32_t exceptions)222 static void pager_unlock(uint32_t exceptions)
223 {
224 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
225 }
226 
tee_pager_phys_to_virt(paddr_t pa,size_t len)227 void *tee_pager_phys_to_virt(paddr_t pa, size_t len)
228 {
229 	struct core_mmu_table_info ti;
230 	unsigned idx;
231 	uint32_t a;
232 	paddr_t p;
233 	vaddr_t v;
234 	size_t n;
235 
236 	if (pa & SMALL_PAGE_MASK || len > SMALL_PAGE_SIZE)
237 		return NULL;
238 
239 	/*
240 	 * Most addresses are mapped lineary, try that first if possible.
241 	 */
242 	if (!tee_pager_get_table_info(pa, &ti))
243 		return NULL; /* impossible pa */
244 	idx = core_mmu_va2idx(&ti, pa);
245 	core_mmu_get_entry(&ti, idx, &p, &a);
246 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
247 		return (void *)core_mmu_idx2va(&ti, idx);
248 
249 	n = 0;
250 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
251 	while (true) {
252 		while (idx < TBL_NUM_ENTRIES) {
253 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
254 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
255 				return NULL;
256 
257 			core_mmu_get_entry(&pager_tables[n].tbl_info,
258 					   idx, &p, &a);
259 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
260 				return (void *)v;
261 			idx++;
262 		}
263 
264 		n++;
265 		if (n >= num_pager_tables)
266 			return NULL;
267 		idx = 0;
268 	}
269 
270 	return NULL;
271 }
272 
pmem_is_hidden(struct tee_pager_pmem * pmem)273 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
274 {
275 	return pmem->flags & PMEM_FLAG_HIDDEN;
276 }
277 
pmem_is_dirty(struct tee_pager_pmem * pmem)278 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
279 {
280 	return pmem->flags & PMEM_FLAG_DIRTY;
281 }
282 
pmem_is_covered_by_region(struct tee_pager_pmem * pmem,struct vm_paged_region * reg)283 static bool pmem_is_covered_by_region(struct tee_pager_pmem *pmem,
284 				      struct vm_paged_region *reg)
285 {
286 	if (pmem->fobj != reg->fobj)
287 		return false;
288 	if (pmem->fobj_pgidx < reg->fobj_pgoffs)
289 		return false;
290 	if ((pmem->fobj_pgidx - reg->fobj_pgoffs) >=
291 	    (reg->size >> SMALL_PAGE_SHIFT))
292 		return false;
293 
294 	return true;
295 }
296 
get_pgt_count(vaddr_t base,size_t size)297 static size_t get_pgt_count(vaddr_t base, size_t size)
298 {
299 	assert(size);
300 
301 	return (base + size - 1) / CORE_MMU_PGDIR_SIZE + 1 -
302 	       base / CORE_MMU_PGDIR_SIZE;
303 }
304 
region_have_pgt(struct vm_paged_region * reg,struct pgt * pgt)305 static bool region_have_pgt(struct vm_paged_region *reg, struct pgt *pgt)
306 {
307 	size_t n = 0;
308 
309 	for (n = 0; n < get_pgt_count(reg->base, reg->size); n++)
310 		if (reg->pgt_array[n] == pgt)
311 			return true;
312 
313 	return false;
314 }
315 
pmem_get_region_tblidx(struct tee_pager_pmem * pmem,struct vm_paged_region * reg)316 static struct tblidx pmem_get_region_tblidx(struct tee_pager_pmem *pmem,
317 					    struct vm_paged_region *reg)
318 {
319 	size_t tbloffs = (reg->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
320 	size_t idx = pmem->fobj_pgidx - reg->fobj_pgoffs + tbloffs;
321 
322 	assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX);
323 	assert(idx / TBL_NUM_ENTRIES < get_pgt_count(reg->base, reg->size));
324 
325 	return (struct tblidx){
326 		.idx = idx % TBL_NUM_ENTRIES,
327 		.pgt = reg->pgt_array[idx / TBL_NUM_ENTRIES],
328 	};
329 }
330 
find_pager_table_may_fail(vaddr_t va)331 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
332 {
333 	size_t n;
334 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
335 
336 	if (!pager_tables)
337 		return NULL;
338 
339 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
340 	    CORE_MMU_PGDIR_SHIFT;
341 	if (n >= num_pager_tables)
342 		return NULL;
343 
344 	assert(va >= pager_tables[n].tbl_info.va_base &&
345 	       va <= (pager_tables[n].tbl_info.va_base | mask));
346 
347 	return pager_tables + n;
348 }
349 
find_pager_table(vaddr_t va)350 static struct pager_table *find_pager_table(vaddr_t va)
351 {
352 	struct pager_table *pt = find_pager_table_may_fail(va);
353 
354 	assert(pt);
355 	return pt;
356 }
357 
tee_pager_get_table_info(vaddr_t va,struct core_mmu_table_info * ti)358 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
359 {
360 	struct pager_table *pt = find_pager_table_may_fail(va);
361 
362 	if (!pt)
363 		return false;
364 
365 	*ti = pt->tbl_info;
366 	return true;
367 }
368 
find_table_info(vaddr_t va)369 static struct core_mmu_table_info *find_table_info(vaddr_t va)
370 {
371 	return &find_pager_table(va)->tbl_info;
372 }
373 
find_core_pgt(vaddr_t va)374 static struct pgt *find_core_pgt(vaddr_t va)
375 {
376 	return &find_pager_table(va)->pgt;
377 }
378 
tee_pager_set_alias_area(tee_mm_entry_t * mm)379 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
380 {
381 	struct pager_table *pt;
382 	unsigned idx;
383 	vaddr_t smem = tee_mm_get_smem(mm);
384 	size_t nbytes = tee_mm_get_bytes(mm);
385 	vaddr_t v;
386 	uint32_t a = 0;
387 
388 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
389 
390 	assert(!pager_alias_area);
391 	pager_alias_area = mm;
392 	pager_alias_next_free = smem;
393 
394 	/* Clear all mapping in the alias area */
395 	pt = find_pager_table(smem);
396 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
397 	while (pt <= (pager_tables + num_pager_tables - 1)) {
398 		while (idx < TBL_NUM_ENTRIES) {
399 			v = core_mmu_idx2va(&pt->tbl_info, idx);
400 			if (v >= (smem + nbytes))
401 				goto out;
402 
403 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
404 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
405 			if (a & TEE_MATTR_VALID_BLOCK)
406 				pgt_dec_used_entries(&pt->pgt);
407 			idx++;
408 		}
409 
410 		pt++;
411 		idx = 0;
412 	}
413 
414 out:
415 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
416 }
417 
tbl_usage_count(struct core_mmu_table_info * ti)418 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
419 {
420 	size_t n;
421 	uint32_t a = 0;
422 	size_t usage = 0;
423 
424 	for (n = 0; n < ti->num_entries; n++) {
425 		core_mmu_get_entry(ti, n, NULL, &a);
426 		if (a & TEE_MATTR_VALID_BLOCK)
427 			usage++;
428 	}
429 	return usage;
430 }
431 
tblidx_get_entry(struct tblidx tblidx,paddr_t * pa,uint32_t * attr)432 static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr)
433 {
434 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
435 	core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
436 				     pa, attr);
437 }
438 
tblidx_set_entry(struct tblidx tblidx,paddr_t pa,uint32_t attr)439 static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr)
440 {
441 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
442 	core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
443 				     pa, attr);
444 }
445 
region_va2tblidx(struct vm_paged_region * reg,vaddr_t va)446 static struct tblidx region_va2tblidx(struct vm_paged_region *reg, vaddr_t va)
447 {
448 	paddr_t mask = CORE_MMU_PGDIR_MASK;
449 	size_t n = 0;
450 
451 	assert(va >= reg->base && va < (reg->base + reg->size));
452 	n = (va - (reg->base & ~mask)) / CORE_MMU_PGDIR_SIZE;
453 
454 	return (struct tblidx){
455 		.idx = (va & mask) / SMALL_PAGE_SIZE,
456 		.pgt = reg->pgt_array[n],
457 	};
458 }
459 
tblidx2va(struct tblidx tblidx)460 static vaddr_t tblidx2va(struct tblidx tblidx)
461 {
462 	return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT);
463 }
464 
tblidx_tlbi_entry(struct tblidx tblidx)465 static void tblidx_tlbi_entry(struct tblidx tblidx)
466 {
467 	vaddr_t va = tblidx2va(tblidx);
468 
469 #if defined(CFG_PAGED_USER_TA)
470 	if (tblidx.pgt->ctx) {
471 		uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid;
472 
473 		tlbi_mva_asid(va, asid);
474 		return;
475 	}
476 #endif
477 	tlbi_mva_allasid(va);
478 }
479 
pmem_assign_fobj_page(struct tee_pager_pmem * pmem,struct vm_paged_region * reg,vaddr_t va)480 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem,
481 				  struct vm_paged_region *reg, vaddr_t va)
482 {
483 	struct tee_pager_pmem *p = NULL;
484 	unsigned int fobj_pgidx = 0;
485 
486 	assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX);
487 
488 	assert(va >= reg->base && va < (reg->base + reg->size));
489 	fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs;
490 
491 	TAILQ_FOREACH(p, &tee_pager_pmem_head, link)
492 		assert(p->fobj != reg->fobj || p->fobj_pgidx != fobj_pgidx);
493 
494 	pmem->fobj = reg->fobj;
495 	pmem->fobj_pgidx = fobj_pgidx;
496 }
497 
pmem_clear(struct tee_pager_pmem * pmem)498 static void pmem_clear(struct tee_pager_pmem *pmem)
499 {
500 	pmem->fobj = NULL;
501 	pmem->fobj_pgidx = INVALID_PGIDX;
502 	pmem->flags = 0;
503 }
504 
pmem_unmap(struct tee_pager_pmem * pmem,struct pgt * only_this_pgt)505 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
506 {
507 	struct vm_paged_region *reg = NULL;
508 	struct tblidx tblidx = { };
509 	uint32_t a = 0;
510 
511 	TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link) {
512 		/*
513 		 * If only_this_pgt points to a pgt then the pgt of this
514 		 * region has to match or we'll skip over it.
515 		 */
516 		if (only_this_pgt && !region_have_pgt(reg, only_this_pgt))
517 			continue;
518 		if (!pmem_is_covered_by_region(pmem, reg))
519 			continue;
520 		tblidx = pmem_get_region_tblidx(pmem, reg);
521 		if (!tblidx.pgt)
522 			continue;
523 		tblidx_get_entry(tblidx, NULL, &a);
524 		if (a & TEE_MATTR_VALID_BLOCK) {
525 			tblidx_set_entry(tblidx, 0, 0);
526 			pgt_dec_used_entries(tblidx.pgt);
527 			tblidx_tlbi_entry(tblidx);
528 		}
529 	}
530 }
531 
tee_pager_early_init(void)532 void tee_pager_early_init(void)
533 {
534 	size_t n = 0;
535 
536 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
537 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
538 	if (!pager_tables)
539 		panic("Cannot allocate pager_tables");
540 
541 	/*
542 	 * Note that this depends on add_pager_vaspace() adding vaspace
543 	 * after end of memory.
544 	 */
545 	for (n = 0; n < num_pager_tables; n++) {
546 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
547 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
548 					 &pager_tables[n].tbl_info))
549 			panic("can't find mmu tables");
550 
551 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
552 			panic("Unsupported page size in translation table");
553 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
554 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
555 
556 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
557 		pager_tables[n].pgt.vabase = pager_tables[n].tbl_info.va_base;
558 		pgt_set_used_entries(&pager_tables[n].pgt,
559 				tbl_usage_count(&pager_tables[n].tbl_info));
560 	}
561 }
562 
pager_add_alias_page(paddr_t pa)563 static void *pager_add_alias_page(paddr_t pa)
564 {
565 	unsigned idx;
566 	struct core_mmu_table_info *ti;
567 	/* Alias pages mapped without write permission: runtime will care */
568 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
569 			TEE_MATTR_PR | (TEE_MATTR_MEM_TYPE_CACHED <<
570 					TEE_MATTR_MEM_TYPE_SHIFT);
571 
572 	DMSG("0x%" PRIxPA, pa);
573 
574 	ti = find_table_info(pager_alias_next_free);
575 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
576 	core_mmu_set_entry(ti, idx, pa, attr);
577 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
578 	pager_alias_next_free += SMALL_PAGE_SIZE;
579 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
580 				      tee_mm_get_bytes(pager_alias_area)))
581 		pager_alias_next_free = 0;
582 	return (void *)core_mmu_idx2va(ti, idx);
583 }
584 
region_insert(struct vm_paged_region_head * regions,struct vm_paged_region * reg,struct vm_paged_region * r_prev)585 static void region_insert(struct vm_paged_region_head *regions,
586 			  struct vm_paged_region *reg,
587 			  struct vm_paged_region *r_prev)
588 {
589 	uint32_t exceptions = pager_lock_check_stack(8);
590 
591 	if (r_prev)
592 		TAILQ_INSERT_AFTER(regions, r_prev, reg, link);
593 	else
594 		TAILQ_INSERT_HEAD(regions, reg, link);
595 	TAILQ_INSERT_TAIL(&reg->fobj->regions, reg, fobj_link);
596 
597 	pager_unlock(exceptions);
598 }
599 DECLARE_KEEP_PAGER(region_insert);
600 
alloc_region(vaddr_t base,size_t size)601 static struct vm_paged_region *alloc_region(vaddr_t base, size_t size)
602 {
603 	struct vm_paged_region *reg = NULL;
604 
605 	if ((base & SMALL_PAGE_MASK) || !size) {
606 		EMSG("invalid pager region [%" PRIxVA " +0x%zx]", base, size);
607 		panic();
608 	}
609 
610 	reg = calloc(1, sizeof(*reg));
611 	if (!reg)
612 		return NULL;
613 	reg->pgt_array = calloc(get_pgt_count(base, size),
614 				sizeof(struct pgt *));
615 	if (!reg->pgt_array) {
616 		free(reg);
617 		return NULL;
618 	}
619 
620 	reg->base = base;
621 	reg->size = size;
622 	return reg;
623 }
624 
tee_pager_add_core_region(vaddr_t base,enum vm_paged_region_type type,struct fobj * fobj)625 void tee_pager_add_core_region(vaddr_t base, enum vm_paged_region_type type,
626 			       struct fobj *fobj)
627 {
628 	struct vm_paged_region *reg = NULL;
629 	size_t n = 0;
630 
631 	assert(fobj);
632 
633 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d",
634 	     base, base + fobj->num_pages * SMALL_PAGE_SIZE, type);
635 
636 	reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE);
637 	if (!reg)
638 		panic("alloc_region");
639 
640 	reg->fobj = fobj_get(fobj);
641 	reg->fobj_pgoffs = 0;
642 	reg->type = type;
643 
644 	switch (type) {
645 	case PAGED_REGION_TYPE_RO:
646 		reg->flags = TEE_MATTR_PRX;
647 		break;
648 	case PAGED_REGION_TYPE_RW:
649 	case PAGED_REGION_TYPE_LOCK:
650 		reg->flags = TEE_MATTR_PRW;
651 		break;
652 	default:
653 		panic();
654 	}
655 
656 	for (n = 0; n < get_pgt_count(reg->base, reg->size); n++)
657 		reg->pgt_array[n] = find_core_pgt(base +
658 						  n * CORE_MMU_PGDIR_SIZE);
659 	region_insert(&core_vm_regions, reg, NULL);
660 }
661 
find_region(struct vm_paged_region_head * regions,vaddr_t va)662 static struct vm_paged_region *find_region(struct vm_paged_region_head *regions,
663 					   vaddr_t va)
664 {
665 	struct vm_paged_region *reg;
666 
667 	if (!regions)
668 		return NULL;
669 
670 	TAILQ_FOREACH(reg, regions, link) {
671 		if (core_is_buffer_inside(va, 1, reg->base, reg->size))
672 			return reg;
673 	}
674 	return NULL;
675 }
676 
677 #ifdef CFG_PAGED_USER_TA
find_uta_region(vaddr_t va)678 static struct vm_paged_region *find_uta_region(vaddr_t va)
679 {
680 	struct ts_ctx *ctx = thread_get_tsd()->ctx;
681 
682 	if (!is_user_mode_ctx(ctx))
683 		return NULL;
684 	return find_region(to_user_mode_ctx(ctx)->regions, va);
685 }
686 #else
find_uta_region(vaddr_t va __unused)687 static struct vm_paged_region *find_uta_region(vaddr_t va __unused)
688 {
689 	return NULL;
690 }
691 #endif /*CFG_PAGED_USER_TA*/
692 
693 
get_region_mattr(uint32_t reg_flags)694 static uint32_t get_region_mattr(uint32_t reg_flags)
695 {
696 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
697 			TEE_MATTR_MEM_TYPE_CACHED << TEE_MATTR_MEM_TYPE_SHIFT |
698 			(reg_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
699 
700 	return attr;
701 }
702 
get_pmem_pa(struct tee_pager_pmem * pmem)703 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
704 {
705 	struct core_mmu_table_info *ti;
706 	paddr_t pa;
707 	unsigned idx;
708 
709 	ti = find_table_info((vaddr_t)pmem->va_alias);
710 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
711 	core_mmu_get_entry(ti, idx, &pa, NULL);
712 	return pa;
713 }
714 
715 #ifdef CFG_PAGED_USER_TA
unlink_region(struct vm_paged_region_head * regions,struct vm_paged_region * reg)716 static void unlink_region(struct vm_paged_region_head *regions,
717 			  struct vm_paged_region *reg)
718 {
719 	uint32_t exceptions = pager_lock_check_stack(64);
720 
721 	TAILQ_REMOVE(regions, reg, link);
722 	TAILQ_REMOVE(&reg->fobj->regions, reg, fobj_link);
723 
724 	pager_unlock(exceptions);
725 }
726 DECLARE_KEEP_PAGER(unlink_region);
727 
free_region(struct vm_paged_region * reg)728 static void free_region(struct vm_paged_region *reg)
729 {
730 	fobj_put(reg->fobj);
731 	free(reg->pgt_array);
732 	free(reg);
733 }
734 
pager_add_um_region(struct user_mode_ctx * uctx,vaddr_t base,struct fobj * fobj,uint32_t prot)735 static TEE_Result pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base,
736 				      struct fobj *fobj, uint32_t prot)
737 {
738 	struct vm_paged_region *r_prev = NULL;
739 	struct vm_paged_region *reg = NULL;
740 	vaddr_t b = base;
741 	size_t fobj_pgoffs = 0;
742 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
743 
744 	if (!uctx->regions) {
745 		uctx->regions = malloc(sizeof(*uctx->regions));
746 		if (!uctx->regions)
747 			return TEE_ERROR_OUT_OF_MEMORY;
748 		TAILQ_INIT(uctx->regions);
749 	}
750 
751 	reg = TAILQ_FIRST(uctx->regions);
752 	while (reg) {
753 		if (core_is_buffer_intersect(b, s, reg->base, reg->size))
754 			return TEE_ERROR_BAD_PARAMETERS;
755 		if (b < reg->base)
756 			break;
757 		r_prev = reg;
758 		reg = TAILQ_NEXT(reg, link);
759 	}
760 
761 	reg = alloc_region(b, s);
762 	if (!reg)
763 		return TEE_ERROR_OUT_OF_MEMORY;
764 
765 	/* Table info will be set when the context is activated. */
766 	reg->fobj = fobj_get(fobj);
767 	reg->fobj_pgoffs = fobj_pgoffs;
768 	reg->type = PAGED_REGION_TYPE_RW;
769 	reg->flags = prot;
770 
771 	region_insert(uctx->regions, reg, r_prev);
772 
773 	return TEE_SUCCESS;
774 }
775 
map_pgts(struct vm_paged_region * reg)776 static void map_pgts(struct vm_paged_region *reg)
777 {
778 	struct core_mmu_table_info dir_info = { NULL };
779 	size_t n = 0;
780 
781 	core_mmu_get_user_pgdir(&dir_info);
782 
783 	for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) {
784 		struct pgt *pgt = reg->pgt_array[n];
785 		uint32_t attr = 0;
786 		paddr_t pa = 0;
787 		size_t idx = 0;
788 
789 		idx = core_mmu_va2idx(&dir_info, pgt->vabase);
790 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
791 
792 		/*
793 		 * Check if the page table already is used, if it is, it's
794 		 * already registered.
795 		 */
796 		if (pgt->num_used_entries) {
797 			assert(attr & TEE_MATTR_TABLE);
798 			assert(pa == virt_to_phys(pgt->tbl));
799 			continue;
800 		}
801 
802 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
803 		pa = virt_to_phys(pgt->tbl);
804 		assert(pa);
805 		/*
806 		 * Note that the update of the table entry is guaranteed to
807 		 * be atomic.
808 		 */
809 		core_mmu_set_entry(&dir_info, idx, pa, attr);
810 	}
811 }
812 
tee_pager_add_um_region(struct user_mode_ctx * uctx,vaddr_t base,struct fobj * fobj,uint32_t prot)813 TEE_Result tee_pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base,
814 				   struct fobj *fobj, uint32_t prot)
815 {
816 	TEE_Result res = TEE_SUCCESS;
817 	struct thread_specific_data *tsd = thread_get_tsd();
818 	struct vm_paged_region *reg = NULL;
819 
820 	res = pager_add_um_region(uctx, base, fobj, prot);
821 	if (res)
822 		return res;
823 
824 	if (uctx->ts_ctx == tsd->ctx) {
825 		/*
826 		 * We're chaning the currently active utc. Assign page
827 		 * tables to the new regions and make sure that the page
828 		 * tables are registered in the upper table.
829 		 */
830 		tee_pager_assign_um_tables(uctx);
831 		TAILQ_FOREACH(reg, uctx->regions, link)
832 			map_pgts(reg);
833 	}
834 
835 	return TEE_SUCCESS;
836 }
837 
split_region(struct vm_paged_region * reg,struct vm_paged_region * r2,vaddr_t va)838 static void split_region(struct vm_paged_region *reg,
839 			 struct vm_paged_region *r2, vaddr_t va)
840 {
841 	uint32_t exceptions = pager_lock_check_stack(64);
842 	size_t diff = va - reg->base;
843 	size_t r2_pgt_count = 0;
844 	size_t reg_pgt_count = 0;
845 	size_t n0 = 0;
846 	size_t n = 0;
847 
848 	assert(r2->base == va);
849 	assert(r2->size == reg->size - diff);
850 
851 	r2->fobj = fobj_get(reg->fobj);
852 	r2->fobj_pgoffs = reg->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
853 	r2->type = reg->type;
854 	r2->flags = reg->flags;
855 
856 	r2_pgt_count = get_pgt_count(r2->base, r2->size);
857 	reg_pgt_count = get_pgt_count(reg->base, reg->size);
858 	n0 = reg_pgt_count - r2_pgt_count;
859 	for (n = n0; n < reg_pgt_count; n++)
860 		r2->pgt_array[n - n0] = reg->pgt_array[n];
861 	reg->size = diff;
862 
863 	TAILQ_INSERT_BEFORE(reg, r2, link);
864 	TAILQ_INSERT_AFTER(&reg->fobj->regions, reg, r2, fobj_link);
865 
866 	pager_unlock(exceptions);
867 }
868 DECLARE_KEEP_PAGER(split_region);
869 
tee_pager_split_um_region(struct user_mode_ctx * uctx,vaddr_t va)870 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
871 {
872 	struct vm_paged_region *reg = NULL;
873 	struct vm_paged_region *r2 = NULL;
874 
875 	if (va & SMALL_PAGE_MASK)
876 		return TEE_ERROR_BAD_PARAMETERS;
877 
878 	TAILQ_FOREACH(reg, uctx->regions, link) {
879 		if (va == reg->base || va == reg->base + reg->size)
880 			return TEE_SUCCESS;
881 		if (va > reg->base && va < reg->base + reg->size) {
882 			size_t diff = va - reg->base;
883 
884 			r2 = alloc_region(va, reg->size - diff);
885 			if (!r2)
886 				return TEE_ERROR_OUT_OF_MEMORY;
887 			split_region(reg, r2, va);
888 			return TEE_SUCCESS;
889 		}
890 	}
891 
892 	return TEE_SUCCESS;
893 }
894 
895 static struct pgt **
merge_region_with_next(struct vm_paged_region_head * regions,struct vm_paged_region * reg,struct vm_paged_region * r_next,struct pgt ** pgt_array)896 merge_region_with_next(struct vm_paged_region_head *regions,
897 		       struct vm_paged_region *reg,
898 		       struct vm_paged_region *r_next, struct pgt **pgt_array)
899 {
900 	uint32_t exceptions = pager_lock_check_stack(64);
901 	struct pgt **old_pgt_array = reg->pgt_array;
902 
903 	reg->pgt_array = pgt_array;
904 	TAILQ_REMOVE(regions, r_next, link);
905 	TAILQ_REMOVE(&r_next->fobj->regions, r_next, fobj_link);
906 
907 	pager_unlock(exceptions);
908 	return old_pgt_array;
909 }
910 DECLARE_KEEP_PAGER(merge_region_with_next);
911 
alloc_merged_pgt_array(struct vm_paged_region * a,struct vm_paged_region * a_next)912 static struct pgt **alloc_merged_pgt_array(struct vm_paged_region *a,
913 					   struct vm_paged_region *a_next)
914 {
915 	size_t a_next_pgt_count = get_pgt_count(a_next->base, a_next->size);
916 	size_t a_pgt_count = get_pgt_count(a->base, a->size);
917 	size_t pgt_count = get_pgt_count(a->base, a->size + a_next->size);
918 	struct pgt **pgt_array = NULL;
919 	bool have_shared_pgt = false;
920 
921 	have_shared_pgt = ((a->base + a->size) & ~CORE_MMU_PGDIR_MASK) ==
922 			  (a_next->base & ~CORE_MMU_PGDIR_MASK);
923 
924 	if (have_shared_pgt)
925 		assert(pgt_count == a_pgt_count + a_next_pgt_count - 1);
926 	else
927 		assert(pgt_count == a_pgt_count + a_next_pgt_count);
928 
929 	/* In case there's a shared pgt they must match */
930 	if (have_shared_pgt &&
931 	    a->pgt_array[a_pgt_count - 1] != a_next->pgt_array[0])
932 		return NULL;
933 
934 	pgt_array = calloc(sizeof(struct pgt *), pgt_count);
935 	if (!pgt_array)
936 		return NULL;
937 
938 	/*
939 	 * Copy and merge the two pgt_arrays, note the special case
940 	 * where a pgt is shared.
941 	 */
942 	memcpy(pgt_array, a->pgt_array, a_pgt_count * sizeof(struct pgt *));
943 	if (have_shared_pgt)
944 		memcpy(pgt_array + a_pgt_count, a_next->pgt_array + 1,
945 		       (a_next_pgt_count - 1) * sizeof(struct pgt *));
946 	else
947 		memcpy(pgt_array + a_pgt_count, a_next->pgt_array,
948 		       a_next_pgt_count * sizeof(struct pgt *));
949 
950 	return pgt_array;
951 }
952 
tee_pager_merge_um_region(struct user_mode_ctx * uctx,vaddr_t va,size_t len)953 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
954 			       size_t len)
955 {
956 	struct vm_paged_region *r_next = NULL;
957 	struct vm_paged_region *reg = NULL;
958 	struct pgt **pgt_array = NULL;
959 	vaddr_t end_va = 0;
960 
961 	if ((va | len) & SMALL_PAGE_MASK)
962 		return;
963 	if (ADD_OVERFLOW(va, len, &end_va))
964 		return;
965 
966 	for (reg = TAILQ_FIRST(uctx->regions);; reg = r_next) {
967 		r_next = TAILQ_NEXT(reg, link);
968 		if (!r_next)
969 			return;
970 
971 		/* Try merging with the area just before va */
972 		if (reg->base + reg->size < va)
973 			continue;
974 
975 		/*
976 		 * If reg->base is well past our range we're done.
977 		 * Note that if it's just the page after our range we'll
978 		 * try to merge.
979 		 */
980 		if (reg->base > end_va)
981 			return;
982 
983 		if (reg->base + reg->size != r_next->base)
984 			continue;
985 		if (reg->fobj != r_next->fobj || reg->type != r_next->type ||
986 		    reg->flags != r_next->flags)
987 			continue;
988 		if (reg->fobj_pgoffs + reg->size / SMALL_PAGE_SIZE !=
989 		    r_next->fobj_pgoffs)
990 			continue;
991 
992 		pgt_array = alloc_merged_pgt_array(reg, r_next);
993 		if (!pgt_array)
994 			continue;
995 
996 		/*
997 		 * merge_region_with_next() returns the old pgt array which
998 		 * was replaced in reg. We don't want to call free()
999 		 * directly from merge_region_with_next() that would pull
1000 		 * free() and its dependencies into the unpaged area.
1001 		 */
1002 		free(merge_region_with_next(uctx->regions, reg, r_next,
1003 					    pgt_array));
1004 		free_region(r_next);
1005 		r_next = reg;
1006 	}
1007 }
1008 
rem_region(struct vm_paged_region_head * regions,struct vm_paged_region * reg)1009 static void rem_region(struct vm_paged_region_head *regions,
1010 		       struct vm_paged_region *reg)
1011 {
1012 	struct tee_pager_pmem *pmem;
1013 	size_t last_pgoffs = reg->fobj_pgoffs +
1014 			     (reg->size >> SMALL_PAGE_SHIFT) - 1;
1015 	uint32_t exceptions;
1016 	struct tblidx tblidx = { };
1017 	uint32_t a = 0;
1018 
1019 	exceptions = pager_lock_check_stack(64);
1020 
1021 	TAILQ_REMOVE(regions, reg, link);
1022 	TAILQ_REMOVE(&reg->fobj->regions, reg, fobj_link);
1023 
1024 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1025 		if (pmem->fobj != reg->fobj ||
1026 		    pmem->fobj_pgidx < reg->fobj_pgoffs ||
1027 		    pmem->fobj_pgidx > last_pgoffs)
1028 			continue;
1029 
1030 		tblidx = pmem_get_region_tblidx(pmem, reg);
1031 		tblidx_get_entry(tblidx, NULL, &a);
1032 		if (!(a & TEE_MATTR_VALID_BLOCK))
1033 			continue;
1034 
1035 		tblidx_set_entry(tblidx, 0, 0);
1036 		tblidx_tlbi_entry(tblidx);
1037 		pgt_dec_used_entries(tblidx.pgt);
1038 	}
1039 
1040 	pager_unlock(exceptions);
1041 }
1042 DECLARE_KEEP_PAGER(rem_region);
1043 
tee_pager_rem_um_region(struct user_mode_ctx * uctx,vaddr_t base,size_t size)1044 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
1045 			     size_t size)
1046 {
1047 	struct vm_paged_region *reg;
1048 	struct vm_paged_region *r_next;
1049 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
1050 
1051 	TAILQ_FOREACH_SAFE(reg, uctx->regions, link, r_next) {
1052 		if (core_is_buffer_inside(reg->base, reg->size, base, s)) {
1053 			rem_region(uctx->regions, reg);
1054 			free_region(reg);
1055 		}
1056 	}
1057 	tlbi_asid(uctx->vm_info.asid);
1058 }
1059 
tee_pager_rem_um_regions(struct user_mode_ctx * uctx)1060 void tee_pager_rem_um_regions(struct user_mode_ctx *uctx)
1061 {
1062 	struct vm_paged_region *reg = NULL;
1063 
1064 	if (!uctx->regions)
1065 		return;
1066 
1067 	while (true) {
1068 		reg = TAILQ_FIRST(uctx->regions);
1069 		if (!reg)
1070 			break;
1071 		unlink_region(uctx->regions, reg);
1072 		free_region(reg);
1073 	}
1074 
1075 	free(uctx->regions);
1076 }
1077 
same_context(struct tee_pager_pmem * pmem)1078 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
1079 {
1080 	struct vm_paged_region *reg = TAILQ_FIRST(&pmem->fobj->regions);
1081 	void *ctx = reg->pgt_array[0]->ctx;
1082 
1083 	do {
1084 		reg = TAILQ_NEXT(reg, fobj_link);
1085 		if (!reg)
1086 			return true;
1087 	} while (reg->pgt_array[0]->ctx == ctx);
1088 
1089 	return false;
1090 }
1091 
tee_pager_set_um_region_attr(struct user_mode_ctx * uctx,vaddr_t base,size_t size,uint32_t flags)1092 bool tee_pager_set_um_region_attr(struct user_mode_ctx *uctx, vaddr_t base,
1093 				  size_t size, uint32_t flags)
1094 {
1095 	bool ret = false;
1096 	vaddr_t b = base;
1097 	size_t s = size;
1098 	size_t s2 = 0;
1099 	struct vm_paged_region *reg = find_region(uctx->regions, b);
1100 	uint32_t exceptions = 0;
1101 	struct tee_pager_pmem *pmem = NULL;
1102 	uint32_t a = 0;
1103 	uint32_t f = 0;
1104 	uint32_t mattr = 0;
1105 	uint32_t f2 = 0;
1106 	struct tblidx tblidx = { };
1107 
1108 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1109 	if (f & TEE_MATTR_UW)
1110 		f |= TEE_MATTR_PW;
1111 	mattr = get_region_mattr(f);
1112 
1113 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1114 
1115 	while (s) {
1116 		if (!reg) {
1117 			ret = false;
1118 			goto out;
1119 		}
1120 		s2 = MIN(reg->size, s);
1121 		b += s2;
1122 		s -= s2;
1123 
1124 		if (reg->flags == f)
1125 			goto next_region;
1126 
1127 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1128 			if (!pmem_is_covered_by_region(pmem, reg))
1129 				continue;
1130 
1131 			tblidx = pmem_get_region_tblidx(pmem, reg);
1132 			tblidx_get_entry(tblidx, NULL, &a);
1133 			if (a == f)
1134 				continue;
1135 			tblidx_set_entry(tblidx, 0, 0);
1136 			tblidx_tlbi_entry(tblidx);
1137 
1138 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1139 			if (pmem_is_dirty(pmem))
1140 				f2 = mattr;
1141 			else
1142 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1143 			tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2);
1144 			if (!(a & TEE_MATTR_VALID_BLOCK))
1145 				pgt_inc_used_entries(tblidx.pgt);
1146 			/*
1147 			 * Make sure the table update is visible before
1148 			 * continuing.
1149 			 */
1150 			dsb_ishst();
1151 
1152 			/*
1153 			 * Here's a problem if this page already is shared.
1154 			 * We need do icache invalidate for each context
1155 			 * in which it is shared. In practice this will
1156 			 * never happen.
1157 			 */
1158 			if (flags & TEE_MATTR_UX) {
1159 				void *va = (void *)tblidx2va(tblidx);
1160 
1161 				/* Assert that the pmem isn't shared. */
1162 				assert(same_context(pmem));
1163 
1164 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1165 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1166 			}
1167 		}
1168 
1169 		reg->flags = f;
1170 next_region:
1171 		reg = TAILQ_NEXT(reg, link);
1172 	}
1173 
1174 	ret = true;
1175 out:
1176 	pager_unlock(exceptions);
1177 	return ret;
1178 }
1179 
1180 DECLARE_KEEP_PAGER(tee_pager_set_um_region_attr);
1181 #endif /*CFG_PAGED_USER_TA*/
1182 
tee_pager_invalidate_fobj(struct fobj * fobj)1183 void tee_pager_invalidate_fobj(struct fobj *fobj)
1184 {
1185 	struct tee_pager_pmem *pmem;
1186 	uint32_t exceptions;
1187 
1188 	exceptions = pager_lock_check_stack(64);
1189 
1190 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1191 		if (pmem->fobj == fobj)
1192 			pmem_clear(pmem);
1193 
1194 	pager_unlock(exceptions);
1195 }
1196 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1197 
pmem_find(struct vm_paged_region * reg,vaddr_t va)1198 static struct tee_pager_pmem *pmem_find(struct vm_paged_region *reg, vaddr_t va)
1199 {
1200 	struct tee_pager_pmem *pmem = NULL;
1201 	size_t fobj_pgidx = 0;
1202 
1203 	assert(va >= reg->base && va < (reg->base + reg->size));
1204 	fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs;
1205 
1206 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1207 		if (pmem->fobj == reg->fobj && pmem->fobj_pgidx == fobj_pgidx)
1208 			return pmem;
1209 
1210 	return NULL;
1211 }
1212 
tee_pager_unhide_page(struct vm_paged_region * reg,vaddr_t page_va)1213 static bool tee_pager_unhide_page(struct vm_paged_region *reg, vaddr_t page_va)
1214 {
1215 	struct tblidx tblidx = region_va2tblidx(reg, page_va);
1216 	struct tee_pager_pmem *pmem = pmem_find(reg, page_va);
1217 	uint32_t a = get_region_mattr(reg->flags);
1218 	uint32_t attr = 0;
1219 	paddr_t pa = 0;
1220 
1221 	if (!pmem)
1222 		return false;
1223 
1224 	tblidx_get_entry(tblidx, NULL, &attr);
1225 	if (attr & TEE_MATTR_VALID_BLOCK)
1226 		return false;
1227 
1228 	/*
1229 	 * The page is hidden, or not not mapped yet. Unhide the page and
1230 	 * move it to the tail.
1231 	 *
1232 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1233 	 * for this address, so no TLB invalidation is required after setting
1234 	 * the new entry. A DSB is needed though, to make the write visible.
1235 	 *
1236 	 * For user executable pages it's more complicated. Those pages can
1237 	 * be shared between multiple TA mappings and thus populated by
1238 	 * another TA. The reference manual states that:
1239 	 *
1240 	 * "instruction cache maintenance is required only after writing
1241 	 * new data to a physical address that holds an instruction."
1242 	 *
1243 	 * So for hidden pages we would not need to invalidate i-cache, but
1244 	 * for newly populated pages we do. Since we don't know which we
1245 	 * have to assume the worst and always invalidate the i-cache. We
1246 	 * don't need to clean the d-cache though, since that has already
1247 	 * been done earlier.
1248 	 *
1249 	 * Additional bookkeeping to tell if the i-cache invalidation is
1250 	 * needed or not is left as a future optimization.
1251 	 */
1252 
1253 	/* If it's not a dirty block, then it should be read only. */
1254 	if (!pmem_is_dirty(pmem))
1255 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1256 
1257 	pa = get_pmem_pa(pmem);
1258 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1259 	if (reg->flags & TEE_MATTR_UX) {
1260 		void *va = (void *)tblidx2va(tblidx);
1261 
1262 		/* Set a temporary read-only mapping */
1263 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1264 		tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX);
1265 		dsb_ishst();
1266 
1267 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1268 
1269 		/* Set the final mapping */
1270 		tblidx_set_entry(tblidx, pa, a);
1271 		tblidx_tlbi_entry(tblidx);
1272 	} else {
1273 		tblidx_set_entry(tblidx, pa, a);
1274 		dsb_ishst();
1275 	}
1276 	pgt_inc_used_entries(tblidx.pgt);
1277 
1278 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1279 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1280 	incr_hidden_hits();
1281 	return true;
1282 }
1283 
tee_pager_hide_pages(void)1284 static void tee_pager_hide_pages(void)
1285 {
1286 	struct tee_pager_pmem *pmem = NULL;
1287 	size_t n = 0;
1288 
1289 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1290 		if (n >= TEE_PAGER_NHIDE)
1291 			break;
1292 		n++;
1293 
1294 		/* we cannot hide pages when pmem->fobj is not defined. */
1295 		if (!pmem->fobj)
1296 			continue;
1297 
1298 		if (pmem_is_hidden(pmem))
1299 			continue;
1300 
1301 		pmem->flags |= PMEM_FLAG_HIDDEN;
1302 		pmem_unmap(pmem, NULL);
1303 	}
1304 }
1305 
1306 static unsigned int __maybe_unused
num_regions_with_pmem(struct tee_pager_pmem * pmem)1307 num_regions_with_pmem(struct tee_pager_pmem *pmem)
1308 {
1309 	struct vm_paged_region *reg = NULL;
1310 	unsigned int num_matches = 0;
1311 
1312 	TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link)
1313 		if (pmem_is_covered_by_region(pmem, reg))
1314 			num_matches++;
1315 
1316 	return num_matches;
1317 }
1318 
1319 /*
1320  * Find mapped pmem, hide and move to pageble pmem.
1321  * Return false if page was not mapped, and true if page was mapped.
1322  */
tee_pager_release_one_phys(struct vm_paged_region * reg,vaddr_t page_va)1323 static bool tee_pager_release_one_phys(struct vm_paged_region *reg,
1324 				       vaddr_t page_va)
1325 {
1326 	struct tee_pager_pmem *pmem = NULL;
1327 	struct tblidx tblidx = { };
1328 	size_t fobj_pgidx = 0;
1329 
1330 	assert(page_va >= reg->base && page_va < (reg->base + reg->size));
1331 	fobj_pgidx = (page_va - reg->base) / SMALL_PAGE_SIZE +
1332 		     reg->fobj_pgoffs;
1333 
1334 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1335 		if (pmem->fobj != reg->fobj || pmem->fobj_pgidx != fobj_pgidx)
1336 			continue;
1337 
1338 		/*
1339 		 * Locked pages may not be shared. We're asserting that the
1340 		 * number of regions using this pmem is one and only one as
1341 		 * we're about to unmap it.
1342 		 */
1343 		assert(num_regions_with_pmem(pmem) == 1);
1344 
1345 		tblidx = pmem_get_region_tblidx(pmem, reg);
1346 		tblidx_set_entry(tblidx, 0, 0);
1347 		pgt_dec_used_entries(tblidx.pgt);
1348 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1349 		pmem_clear(pmem);
1350 		tee_pager_npages++;
1351 		set_npages();
1352 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1353 		incr_zi_released();
1354 		return true;
1355 	}
1356 
1357 	return false;
1358 }
1359 
pager_deploy_page(struct tee_pager_pmem * pmem,struct vm_paged_region * reg,vaddr_t page_va,bool clean_user_cache,bool writable)1360 static void pager_deploy_page(struct tee_pager_pmem *pmem,
1361 			      struct vm_paged_region *reg, vaddr_t page_va,
1362 			      bool clean_user_cache, bool writable)
1363 {
1364 	struct tblidx tblidx = region_va2tblidx(reg, page_va);
1365 	uint32_t attr = get_region_mattr(reg->flags);
1366 	struct core_mmu_table_info *ti = NULL;
1367 	uint8_t *va_alias = pmem->va_alias;
1368 	paddr_t pa = get_pmem_pa(pmem);
1369 	unsigned int idx_alias = 0;
1370 	uint32_t attr_alias = 0;
1371 	paddr_t pa_alias = 0;
1372 
1373 	/* Ensure we are allowed to write to aliased virtual page */
1374 	ti = find_table_info((vaddr_t)va_alias);
1375 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
1376 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
1377 	if (!(attr_alias & TEE_MATTR_PW)) {
1378 		attr_alias |= TEE_MATTR_PW;
1379 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1380 		tlbi_mva_allasid((vaddr_t)va_alias);
1381 	}
1382 
1383 	asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1384 	if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) {
1385 		EMSG("PH 0x%" PRIxVA " failed", page_va);
1386 		panic();
1387 	}
1388 	switch (reg->type) {
1389 	case PAGED_REGION_TYPE_RO:
1390 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1391 		incr_ro_hits();
1392 		/* Forbid write to aliases for read-only (maybe exec) pages */
1393 		attr_alias &= ~TEE_MATTR_PW;
1394 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1395 		tlbi_mva_allasid((vaddr_t)va_alias);
1396 		break;
1397 	case PAGED_REGION_TYPE_RW:
1398 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1399 		if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW)))
1400 			pmem->flags |= PMEM_FLAG_DIRTY;
1401 		incr_rw_hits();
1402 		break;
1403 	case PAGED_REGION_TYPE_LOCK:
1404 		/* Move page to lock list */
1405 		if (tee_pager_npages <= 0)
1406 			panic("Running out of pages");
1407 		tee_pager_npages--;
1408 		set_npages();
1409 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1410 		break;
1411 	default:
1412 		panic();
1413 	}
1414 	asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1415 
1416 	if (!writable)
1417 		attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1418 
1419 	/*
1420 	 * We've updated the page using the aliased mapping and
1421 	 * some cache maintenance is now needed if it's an
1422 	 * executable page.
1423 	 *
1424 	 * Since the d-cache is a Physically-indexed,
1425 	 * physically-tagged (PIPT) cache we can clean either the
1426 	 * aliased address or the real virtual address. In this
1427 	 * case we choose the real virtual address.
1428 	 *
1429 	 * The i-cache can also be PIPT, but may be something else
1430 	 * too like VIPT. The current code requires the caches to
1431 	 * implement the IVIPT extension, that is:
1432 	 * "instruction cache maintenance is required only after
1433 	 * writing new data to a physical address that holds an
1434 	 * instruction."
1435 	 *
1436 	 * To portably invalidate the icache the page has to
1437 	 * be mapped at the final virtual address but not
1438 	 * executable.
1439 	 */
1440 	if (reg->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1441 		uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1442 				TEE_MATTR_PW | TEE_MATTR_UW;
1443 		void *va = (void *)page_va;
1444 
1445 		/* Set a temporary read-only mapping */
1446 		tblidx_set_entry(tblidx, pa, attr & ~mask);
1447 		tblidx_tlbi_entry(tblidx);
1448 
1449 		dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1450 		if (clean_user_cache)
1451 			icache_inv_user_range(va, SMALL_PAGE_SIZE);
1452 		else
1453 			icache_inv_range(va, SMALL_PAGE_SIZE);
1454 
1455 		/* Set the final mapping */
1456 		tblidx_set_entry(tblidx, pa, attr);
1457 		tblidx_tlbi_entry(tblidx);
1458 	} else {
1459 		tblidx_set_entry(tblidx, pa, attr);
1460 		/*
1461 		 * No need to flush TLB for this entry, it was
1462 		 * invalid. We should use a barrier though, to make
1463 		 * sure that the change is visible.
1464 		 */
1465 		dsb_ishst();
1466 	}
1467 	pgt_inc_used_entries(tblidx.pgt);
1468 
1469 	FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1470 }
1471 
make_dirty_page(struct tee_pager_pmem * pmem,struct vm_paged_region * reg,struct tblidx tblidx,paddr_t pa)1472 static void make_dirty_page(struct tee_pager_pmem *pmem,
1473 			    struct vm_paged_region *reg, struct tblidx tblidx,
1474 			    paddr_t pa)
1475 {
1476 	assert(reg->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1477 	assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1478 
1479 	FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx));
1480 	pmem->flags |= PMEM_FLAG_DIRTY;
1481 	tblidx_set_entry(tblidx, pa, get_region_mattr(reg->flags));
1482 	tblidx_tlbi_entry(tblidx);
1483 }
1484 
1485 /*
1486  * This function takes a reference to a page (@fobj + fobj_pgidx) and makes
1487  * the corresponding IV available.
1488  *
1489  * In case the page needs to be saved the IV must be writable, consequently
1490  * is the page holding the IV made dirty. If the page instead only is to
1491  * be verified it's enough that the page holding the IV is readonly and
1492  * thus doesn't have to be made dirty too.
1493  *
1494  * This function depends on pager_spare_pmem pointing to a free pmem when
1495  * entered. In case the page holding the needed IV isn't mapped this spare
1496  * pmem is used to map the page. If this function has used pager_spare_pmem
1497  * and assigned it to NULL it must be reassigned with a new free pmem
1498  * before this function can be called again.
1499  */
make_iv_available(struct fobj * fobj,unsigned int fobj_pgidx,bool writable)1500 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx,
1501 			      bool writable)
1502 {
1503 	struct vm_paged_region *reg = pager_iv_region;
1504 	struct tee_pager_pmem *pmem = NULL;
1505 	struct tblidx tblidx = { };
1506 	vaddr_t page_va = 0;
1507 	uint32_t attr = 0;
1508 	paddr_t pa = 0;
1509 
1510 	page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK;
1511 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) {
1512 		assert(!page_va);
1513 		return;
1514 	}
1515 
1516 	assert(reg && reg->type == PAGED_REGION_TYPE_RW);
1517 	assert(pager_spare_pmem);
1518 	assert(core_is_buffer_inside(page_va, 1, reg->base, reg->size));
1519 
1520 	tblidx = region_va2tblidx(reg, page_va);
1521 	/*
1522 	 * We don't care if tee_pager_unhide_page() succeeds or not, we're
1523 	 * still checking the attributes afterwards.
1524 	 */
1525 	tee_pager_unhide_page(reg, page_va);
1526 	tblidx_get_entry(tblidx, &pa, &attr);
1527 	if (!(attr & TEE_MATTR_VALID_BLOCK)) {
1528 		/*
1529 		 * We're using the spare pmem to map the IV corresponding
1530 		 * to another page.
1531 		 */
1532 		pmem = pager_spare_pmem;
1533 		pager_spare_pmem = NULL;
1534 		pmem_assign_fobj_page(pmem, reg, page_va);
1535 
1536 		if (writable)
1537 			pmem->flags |= PMEM_FLAG_DIRTY;
1538 
1539 		pager_deploy_page(pmem, reg, page_va,
1540 				  false /*!clean_user_cache*/, writable);
1541 	} else if (writable && !(attr & TEE_MATTR_PW)) {
1542 		pmem = pmem_find(reg, page_va);
1543 		/* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */
1544 		make_dirty_page(pmem, reg, tblidx, pa);
1545 	}
1546 }
1547 
pager_get_page(struct vm_paged_region * reg,struct abort_info * ai,bool clean_user_cache)1548 static void pager_get_page(struct vm_paged_region *reg, struct abort_info *ai,
1549 			   bool clean_user_cache)
1550 {
1551 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1552 	struct tblidx tblidx = region_va2tblidx(reg, page_va);
1553 	struct tee_pager_pmem *pmem = NULL;
1554 	bool writable = false;
1555 	uint32_t attr = 0;
1556 
1557 	/*
1558 	 * Get a pmem to load code and data into, also make sure
1559 	 * the corresponding IV page is available.
1560 	 */
1561 	while (true) {
1562 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1563 		if (!pmem) {
1564 			EMSG("No pmem entries");
1565 			abort_print(ai);
1566 			panic();
1567 		}
1568 
1569 		if (pmem->fobj) {
1570 			pmem_unmap(pmem, NULL);
1571 			if (pmem_is_dirty(pmem)) {
1572 				uint8_t *va = pmem->va_alias;
1573 
1574 				make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1575 						  true /*writable*/);
1576 				asan_tag_access(va, va + SMALL_PAGE_SIZE);
1577 				if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
1578 						   pmem->va_alias))
1579 					panic("fobj_save_page");
1580 				asan_tag_no_access(va, va + SMALL_PAGE_SIZE);
1581 
1582 				pmem_clear(pmem);
1583 
1584 				/*
1585 				 * If the spare pmem was used by
1586 				 * make_iv_available() we need to replace
1587 				 * it with the just freed pmem.
1588 				 *
1589 				 * See make_iv_available() for details.
1590 				 */
1591 				if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1592 				    !pager_spare_pmem) {
1593 					TAILQ_REMOVE(&tee_pager_pmem_head,
1594 						     pmem, link);
1595 					pager_spare_pmem = pmem;
1596 					pmem = NULL;
1597 				}
1598 
1599 				/*
1600 				 * Check if the needed virtual page was
1601 				 * made available as a side effect of the
1602 				 * call to make_iv_available() above. If so
1603 				 * we're done.
1604 				 */
1605 				tblidx_get_entry(tblidx, NULL, &attr);
1606 				if (attr & TEE_MATTR_VALID_BLOCK)
1607 					return;
1608 
1609 				/*
1610 				 * The freed pmem was used to replace the
1611 				 * consumed pager_spare_pmem above. Restart
1612 				 * to find another pmem.
1613 				 */
1614 				if (!pmem)
1615 					continue;
1616 			}
1617 		}
1618 
1619 		TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1620 		pmem_clear(pmem);
1621 
1622 		pmem_assign_fobj_page(pmem, reg, page_va);
1623 		make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1624 				  false /*!writable*/);
1625 		if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem)
1626 			break;
1627 
1628 		/*
1629 		 * The spare pmem was used by make_iv_available(). We need
1630 		 * to replace it with the just freed pmem. And get another
1631 		 * pmem.
1632 		 *
1633 		 * See make_iv_available() for details.
1634 		 */
1635 		pmem_clear(pmem);
1636 		pager_spare_pmem = pmem;
1637 	}
1638 
1639 	/*
1640 	 * PAGED_REGION_TYPE_LOCK are always writable while PAGED_REGION_TYPE_RO
1641 	 * are never writable.
1642 	 *
1643 	 * Pages from PAGED_REGION_TYPE_RW starts read-only to be
1644 	 * able to tell when they are updated and should be tagged
1645 	 * as dirty.
1646 	 */
1647 	if (reg->type == PAGED_REGION_TYPE_LOCK ||
1648 	    (reg->type == PAGED_REGION_TYPE_RW && abort_is_write_fault(ai)))
1649 		writable = true;
1650 	else
1651 		writable = false;
1652 
1653 	pager_deploy_page(pmem, reg, page_va, clean_user_cache, writable);
1654 }
1655 
pager_update_permissions(struct vm_paged_region * reg,struct abort_info * ai,bool * handled)1656 static bool pager_update_permissions(struct vm_paged_region *reg,
1657 				     struct abort_info *ai, bool *handled)
1658 {
1659 	struct tblidx tblidx = region_va2tblidx(reg, ai->va);
1660 	struct tee_pager_pmem *pmem = NULL;
1661 	uint32_t attr = 0;
1662 	paddr_t pa = 0;
1663 
1664 	*handled = false;
1665 
1666 	tblidx_get_entry(tblidx, &pa, &attr);
1667 
1668 	/* Not mapped */
1669 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1670 		return false;
1671 
1672 	/* Not readable, should not happen */
1673 	if (abort_is_user_exception(ai)) {
1674 		if (!(attr & TEE_MATTR_UR))
1675 			return true;
1676 	} else {
1677 		if (!(attr & TEE_MATTR_PR)) {
1678 			abort_print_error(ai);
1679 			panic();
1680 		}
1681 	}
1682 
1683 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1684 	case CORE_MMU_FAULT_TRANSLATION:
1685 	case CORE_MMU_FAULT_READ_PERMISSION:
1686 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1687 			/* Check attempting to execute from an NOX page */
1688 			if (abort_is_user_exception(ai)) {
1689 				if (!(attr & TEE_MATTR_UX))
1690 					return true;
1691 			} else {
1692 				if (!(attr & TEE_MATTR_PX)) {
1693 					abort_print_error(ai);
1694 					panic();
1695 				}
1696 			}
1697 		}
1698 		/* Since the page is mapped now it's OK */
1699 		break;
1700 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1701 		/* Check attempting to write to an RO page */
1702 		pmem = pmem_find(reg, ai->va);
1703 		if (!pmem)
1704 			panic();
1705 		if (abort_is_user_exception(ai)) {
1706 			if (!(reg->flags & TEE_MATTR_UW))
1707 				return true;
1708 			if (!(attr & TEE_MATTR_UW))
1709 				make_dirty_page(pmem, reg, tblidx, pa);
1710 		} else {
1711 			if (!(reg->flags & TEE_MATTR_PW)) {
1712 				abort_print_error(ai);
1713 				panic();
1714 			}
1715 			if (!(attr & TEE_MATTR_PW))
1716 				make_dirty_page(pmem, reg, tblidx, pa);
1717 		}
1718 		/* Since permissions has been updated now it's OK */
1719 		break;
1720 	default:
1721 		/* Some fault we can't deal with */
1722 		if (abort_is_user_exception(ai))
1723 			return true;
1724 		abort_print_error(ai);
1725 		panic();
1726 	}
1727 	*handled = true;
1728 	return true;
1729 }
1730 
1731 #ifdef CFG_TEE_CORE_DEBUG
stat_handle_fault(void)1732 static void stat_handle_fault(void)
1733 {
1734 	static size_t num_faults;
1735 	static size_t min_npages = SIZE_MAX;
1736 	static size_t total_min_npages = SIZE_MAX;
1737 
1738 	num_faults++;
1739 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1740 		DMSG("nfaults %zu npages %zu (min %zu)",
1741 		     num_faults, tee_pager_npages, min_npages);
1742 		min_npages = tee_pager_npages; /* reset */
1743 	}
1744 	if (tee_pager_npages < min_npages)
1745 		min_npages = tee_pager_npages;
1746 	if (tee_pager_npages < total_min_npages)
1747 		total_min_npages = tee_pager_npages;
1748 }
1749 #else
stat_handle_fault(void)1750 static void stat_handle_fault(void)
1751 {
1752 }
1753 #endif
1754 
tee_pager_handle_fault(struct abort_info * ai)1755 bool tee_pager_handle_fault(struct abort_info *ai)
1756 {
1757 	struct vm_paged_region *reg;
1758 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1759 	uint32_t exceptions;
1760 	bool ret;
1761 	bool clean_user_cache = false;
1762 
1763 #ifdef TEE_PAGER_DEBUG_PRINT
1764 	if (!abort_is_user_exception(ai))
1765 		abort_print(ai);
1766 #endif
1767 
1768 	/*
1769 	 * We're updating pages that can affect several active CPUs at a
1770 	 * time below. We end up here because a thread tries to access some
1771 	 * memory that isn't available. We have to be careful when making
1772 	 * that memory available as other threads may succeed in accessing
1773 	 * that address the moment after we've made it available.
1774 	 *
1775 	 * That means that we can't just map the memory and populate the
1776 	 * page, instead we use the aliased mapping to populate the page
1777 	 * and once everything is ready we map it.
1778 	 */
1779 	exceptions = pager_lock(ai);
1780 
1781 	stat_handle_fault();
1782 
1783 	/* check if the access is valid */
1784 	if (abort_is_user_exception(ai)) {
1785 		reg = find_uta_region(ai->va);
1786 		clean_user_cache = true;
1787 	} else {
1788 		reg = find_region(&core_vm_regions, ai->va);
1789 		if (!reg) {
1790 			reg = find_uta_region(ai->va);
1791 			clean_user_cache = true;
1792 		}
1793 	}
1794 	if (!reg || !reg->pgt_array[0]) {
1795 		ret = false;
1796 		goto out;
1797 	}
1798 
1799 	if (tee_pager_unhide_page(reg, page_va))
1800 		goto out_success;
1801 
1802 	/*
1803 	 * The page wasn't hidden, but some other core may have
1804 	 * updated the table entry before we got here or we need
1805 	 * to make a read-only page read-write (dirty).
1806 	 */
1807 	if (pager_update_permissions(reg, ai, &ret)) {
1808 		/*
1809 		 * Nothing more to do with the abort. The problem
1810 		 * could already have been dealt with from another
1811 		 * core or if ret is false the TA will be paniced.
1812 		 */
1813 		goto out;
1814 	}
1815 
1816 	pager_get_page(reg, ai, clean_user_cache);
1817 
1818 out_success:
1819 	tee_pager_hide_pages();
1820 	ret = true;
1821 out:
1822 	pager_unlock(exceptions);
1823 	return ret;
1824 }
1825 
tee_pager_add_pages(vaddr_t vaddr,size_t npages,bool unmap)1826 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1827 {
1828 	size_t n = 0;
1829 
1830 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1831 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1832 
1833 	/* setup memory */
1834 	for (n = 0; n < npages; n++) {
1835 		struct core_mmu_table_info *ti = NULL;
1836 		struct tee_pager_pmem *pmem = NULL;
1837 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1838 		struct tblidx tblidx = { };
1839 		unsigned int pgidx = 0;
1840 		paddr_t pa = 0;
1841 		uint32_t attr = 0;
1842 
1843 		ti = find_table_info(va);
1844 		pgidx = core_mmu_va2idx(ti, va);
1845 		/*
1846 		 * Note that we can only support adding pages in the
1847 		 * valid range of this table info, currently not a problem.
1848 		 */
1849 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1850 
1851 		/* Ignore unmapped pages/blocks */
1852 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1853 			continue;
1854 
1855 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1856 		if (!pmem)
1857 			panic("out of mem");
1858 		pmem_clear(pmem);
1859 
1860 		pmem->va_alias = pager_add_alias_page(pa);
1861 
1862 		if (unmap) {
1863 			core_mmu_set_entry(ti, pgidx, 0, 0);
1864 			pgt_dec_used_entries(find_core_pgt(va));
1865 		} else {
1866 			struct vm_paged_region *reg = NULL;
1867 
1868 			/*
1869 			 * The page is still mapped, let's assign the region
1870 			 * and update the protection bits accordingly.
1871 			 */
1872 			reg = find_region(&core_vm_regions, va);
1873 			assert(reg);
1874 			pmem_assign_fobj_page(pmem, reg, va);
1875 			tblidx = pmem_get_region_tblidx(pmem, reg);
1876 			assert(tblidx.pgt == find_core_pgt(va));
1877 			assert(pa == get_pmem_pa(pmem));
1878 			tblidx_set_entry(tblidx, pa,
1879 					 get_region_mattr(reg->flags));
1880 		}
1881 
1882 		if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1883 		    !pager_spare_pmem) {
1884 			pager_spare_pmem = pmem;
1885 		} else {
1886 			tee_pager_npages++;
1887 			incr_npages_all();
1888 			set_npages();
1889 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1890 		}
1891 	}
1892 
1893 	/*
1894 	 * As this is done at inits, invalidate all TLBs once instead of
1895 	 * targeting only the modified entries.
1896 	 */
1897 	tlbi_all();
1898 }
1899 
1900 #ifdef CFG_PAGED_USER_TA
find_pgt(struct pgt * pgt,vaddr_t va)1901 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1902 {
1903 	struct pgt *p = pgt;
1904 
1905 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1906 		p = SLIST_NEXT(p, link);
1907 	return p;
1908 }
1909 
tee_pager_assign_um_tables(struct user_mode_ctx * uctx)1910 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1911 {
1912 	struct vm_paged_region *reg = NULL;
1913 	struct pgt *pgt = NULL;
1914 	size_t n = 0;
1915 
1916 	if (!uctx->regions)
1917 		return;
1918 
1919 	pgt = SLIST_FIRST(&uctx->pgt_cache);
1920 	TAILQ_FOREACH(reg, uctx->regions, link) {
1921 		for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) {
1922 			vaddr_t va = reg->base + CORE_MMU_PGDIR_SIZE * n;
1923 			struct pgt *p __maybe_unused = find_pgt(pgt, va);
1924 
1925 			if (!reg->pgt_array[n])
1926 				reg->pgt_array[n] = p;
1927 			else
1928 				assert(reg->pgt_array[n] == p);
1929 		}
1930 	}
1931 }
1932 
tee_pager_pgt_save_and_release_entries(struct pgt * pgt)1933 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1934 {
1935 	struct tee_pager_pmem *pmem = NULL;
1936 	struct vm_paged_region *reg = NULL;
1937 	struct vm_paged_region_head *regions = NULL;
1938 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1939 	size_t n = 0;
1940 
1941 	if (!pgt->num_used_entries)
1942 		goto out;
1943 
1944 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1945 		if (pmem->fobj)
1946 			pmem_unmap(pmem, pgt);
1947 	}
1948 	assert(!pgt->num_used_entries);
1949 
1950 out:
1951 	regions = to_user_mode_ctx(pgt->ctx)->regions;
1952 	if (regions) {
1953 		TAILQ_FOREACH(reg, regions, link) {
1954 			for (n = 0; n < get_pgt_count(reg->base, reg->size);
1955 			     n++) {
1956 				if (reg->pgt_array[n] == pgt) {
1957 					reg->pgt_array[n] = NULL;
1958 					break;
1959 				}
1960 			}
1961 		}
1962 	}
1963 
1964 	pager_unlock(exceptions);
1965 }
1966 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1967 #endif /*CFG_PAGED_USER_TA*/
1968 
tee_pager_release_phys(void * addr,size_t size)1969 void tee_pager_release_phys(void *addr, size_t size)
1970 {
1971 	bool unmaped = false;
1972 	vaddr_t va = (vaddr_t)addr;
1973 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1974 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1975 	struct vm_paged_region *reg;
1976 	uint32_t exceptions;
1977 
1978 	if (end <= begin)
1979 		return;
1980 
1981 	exceptions = pager_lock_check_stack(128);
1982 
1983 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1984 		reg = find_region(&core_vm_regions, va);
1985 		if (!reg)
1986 			panic();
1987 		unmaped |= tee_pager_release_one_phys(reg, va);
1988 	}
1989 
1990 	if (unmaped)
1991 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1992 
1993 	pager_unlock(exceptions);
1994 }
1995 DECLARE_KEEP_PAGER(tee_pager_release_phys);
1996 
tee_pager_alloc(size_t size)1997 void *tee_pager_alloc(size_t size)
1998 {
1999 	tee_mm_entry_t *mm = NULL;
2000 	uint8_t *smem = NULL;
2001 	size_t num_pages = 0;
2002 	struct fobj *fobj = NULL;
2003 
2004 	if (!size)
2005 		return NULL;
2006 
2007 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
2008 	if (!mm)
2009 		return NULL;
2010 
2011 	smem = (uint8_t *)tee_mm_get_smem(mm);
2012 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
2013 	fobj = fobj_locked_paged_alloc(num_pages);
2014 	if (!fobj) {
2015 		tee_mm_free(mm);
2016 		return NULL;
2017 	}
2018 
2019 	tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_LOCK, fobj);
2020 	fobj_put(fobj);
2021 
2022 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
2023 
2024 	return smem;
2025 }
2026 
tee_pager_init_iv_region(struct fobj * fobj)2027 vaddr_t tee_pager_init_iv_region(struct fobj *fobj)
2028 {
2029 	tee_mm_entry_t *mm = NULL;
2030 	uint8_t *smem = NULL;
2031 
2032 	assert(!pager_iv_region);
2033 
2034 	mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE);
2035 	if (!mm)
2036 		panic();
2037 
2038 	smem = (uint8_t *)tee_mm_get_smem(mm);
2039 	tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_RW, fobj);
2040 	fobj_put(fobj);
2041 
2042 	asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE);
2043 
2044 	pager_iv_region = find_region(&core_vm_regions, (vaddr_t)smem);
2045 	assert(pager_iv_region && pager_iv_region->fobj == fobj);
2046 
2047 	return (vaddr_t)smem;
2048 }
2049