1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2022, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25
26 struct mobj *mobj_sec_ddr;
27 struct mobj *mobj_tee_ram_rx;
28 struct mobj *mobj_tee_ram_rw;
29
30 /*
31 * mobj_phys implementation
32 */
33
34 struct mobj_phys {
35 struct mobj mobj;
36 enum buf_is_attr battr;
37 /* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */
38 uint32_t mem_type;
39 vaddr_t va;
40 paddr_t pa;
41 };
42
43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
44
mobj_phys_get_va(struct mobj * mobj,size_t offset,size_t len)45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
46 {
47 struct mobj_phys *moph = to_mobj_phys(mobj);
48
49 if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
50 return NULL;
51
52 return (void *)(moph->va + offset);
53 }
54
mobj_phys_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
56 size_t granule, paddr_t *pa)
57 {
58 struct mobj_phys *moph = to_mobj_phys(mobj);
59 paddr_t p;
60
61 if (!pa)
62 return TEE_ERROR_GENERIC;
63
64 p = moph->pa + offs;
65
66 if (granule) {
67 if (granule != SMALL_PAGE_SIZE &&
68 granule != CORE_MMU_PGDIR_SIZE)
69 return TEE_ERROR_GENERIC;
70 p &= ~(granule - 1);
71 }
72
73 *pa = p;
74 return TEE_SUCCESS;
75 }
76 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
77
mobj_phys_get_mem_type(struct mobj * mobj,uint32_t * mem_type)78 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
79 {
80 struct mobj_phys *moph = to_mobj_phys(mobj);
81
82 if (!mem_type)
83 return TEE_ERROR_GENERIC;
84
85 *mem_type = moph->mem_type;
86 return TEE_SUCCESS;
87 }
88
mobj_phys_matches(struct mobj * mobj,enum buf_is_attr attr)89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
90 {
91 struct mobj_phys *moph = to_mobj_phys(mobj);
92 enum buf_is_attr a;
93
94 a = moph->battr;
95
96 switch (attr) {
97 case CORE_MEM_SEC:
98 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
99 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
100 case CORE_MEM_NON_SEC:
101 return a == CORE_MEM_NSEC_SHM;
102 case CORE_MEM_TEE_RAM:
103 case CORE_MEM_TA_RAM:
104 case CORE_MEM_NSEC_SHM:
105 case CORE_MEM_SDP_MEM:
106 return attr == a;
107 default:
108 return false;
109 }
110 }
111
mobj_phys_free(struct mobj * mobj)112 static void mobj_phys_free(struct mobj *mobj)
113 {
114 struct mobj_phys *moph = to_mobj_phys(mobj);
115
116 free(moph);
117 }
118
119 /*
120 * Note: this variable is weak just to ease breaking its dependency chain
121 * when added to the unpaged area.
122 */
123 const struct mobj_ops mobj_phys_ops
124 __weak __relrodata_unpaged("mobj_phys_ops") = {
125 .get_va = mobj_phys_get_va,
126 .get_pa = mobj_phys_get_pa,
127 .get_phys_offs = NULL, /* only offset 0 */
128 .get_mem_type = mobj_phys_get_mem_type,
129 .matches = mobj_phys_matches,
130 .free = mobj_phys_free,
131 };
132
to_mobj_phys(struct mobj * mobj)133 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
134 {
135 assert(mobj->ops == &mobj_phys_ops);
136 return container_of(mobj, struct mobj_phys, mobj);
137 }
138
mobj_phys_init(paddr_t pa,size_t size,uint32_t mem_type,enum buf_is_attr battr,enum teecore_memtypes area_type)139 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type,
140 enum buf_is_attr battr,
141 enum teecore_memtypes area_type)
142 {
143 void *va = NULL;
144 struct mobj_phys *moph = NULL;
145 struct tee_mmap_region *map = NULL;
146
147 if ((pa & CORE_MMU_USER_PARAM_MASK) ||
148 (size & CORE_MMU_USER_PARAM_MASK)) {
149 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
150 return NULL;
151 }
152
153 if (pa) {
154 va = phys_to_virt(pa, area_type, size);
155 } else {
156 map = core_mmu_find_mapping_exclusive(area_type, size);
157 if (!map)
158 return NULL;
159
160 pa = map->pa;
161 va = (void *)map->va;
162 }
163
164 /* Only SDP memory may not have a virtual address */
165 if (!va && battr != CORE_MEM_SDP_MEM)
166 return NULL;
167
168 moph = calloc(1, sizeof(*moph));
169 if (!moph)
170 return NULL;
171
172 moph->battr = battr;
173 moph->mem_type = mem_type;
174 moph->mobj.size = size;
175 moph->mobj.ops = &mobj_phys_ops;
176 refcount_set(&moph->mobj.refc, 1);
177 moph->pa = pa;
178 moph->va = (vaddr_t)va;
179
180 return &moph->mobj;
181 }
182
mobj_phys_alloc(paddr_t pa,size_t size,uint32_t mem_type,enum buf_is_attr battr)183 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type,
184 enum buf_is_attr battr)
185 {
186 enum teecore_memtypes area_type;
187
188 switch (battr) {
189 case CORE_MEM_TEE_RAM:
190 area_type = MEM_AREA_TEE_RAM_RW_DATA;
191 break;
192 case CORE_MEM_TA_RAM:
193 area_type = MEM_AREA_TA_RAM;
194 break;
195 case CORE_MEM_NSEC_SHM:
196 area_type = MEM_AREA_NSEC_SHM;
197 break;
198 case CORE_MEM_SDP_MEM:
199 area_type = MEM_AREA_SDP_MEM;
200 break;
201 default:
202 DMSG("can't allocate with specified attribute");
203 return NULL;
204 }
205
206 return mobj_phys_init(pa, size, mem_type, battr, area_type);
207 }
208
209 /*
210 * mobj_virt implementation
211 */
212
213 static void mobj_virt_assert_type(struct mobj *mobj);
214
mobj_virt_get_va(struct mobj * mobj,size_t offset,size_t len __maybe_unused)215 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
216 size_t len __maybe_unused)
217 {
218 mobj_virt_assert_type(mobj);
219 assert(mobj_check_offset_and_len(mobj, offset, len));
220
221 return (void *)(vaddr_t)offset;
222 }
223
224 /*
225 * Note: this variable is weak just to ease breaking its dependency chain
226 * when added to the unpaged area.
227 */
228 const struct mobj_ops mobj_virt_ops
229 __weak __relrodata_unpaged("mobj_virt_ops") = {
230 .get_va = mobj_virt_get_va,
231 };
232
mobj_virt_assert_type(struct mobj * mobj __maybe_unused)233 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
234 {
235 assert(mobj->ops == &mobj_virt_ops);
236 }
237
238 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
239
240 /*
241 * mobj_mm implementation
242 */
243
244 struct mobj_mm {
245 tee_mm_entry_t *mm;
246 struct mobj *parent_mobj;
247 struct mobj mobj;
248 };
249
250 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
251
mobj_mm_offs(struct mobj * mobj,size_t offs)252 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
253 {
254 tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
255
256 return (mm->offset << mm->pool->shift) + offs;
257 }
258
mobj_mm_get_va(struct mobj * mobj,size_t offs,size_t len)259 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len)
260 {
261 return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
262 mobj_mm_offs(mobj, offs), len);
263 }
264
265
mobj_mm_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)266 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
267 size_t granule, paddr_t *pa)
268 {
269 return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
270 mobj_mm_offs(mobj, offs), granule, pa);
271 }
272 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
273
mobj_mm_get_phys_offs(struct mobj * mobj,size_t granule)274 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
275 {
276 return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
277 }
278
mobj_mm_get_mem_type(struct mobj * mobj,uint32_t * mem_type)279 static TEE_Result mobj_mm_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
280 {
281 return mobj_get_mem_type(to_mobj_mm(mobj)->parent_mobj, mem_type);
282 }
283
mobj_mm_matches(struct mobj * mobj,enum buf_is_attr attr)284 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
285 {
286 return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
287 }
288
mobj_mm_free(struct mobj * mobj)289 static void mobj_mm_free(struct mobj *mobj)
290 {
291 struct mobj_mm *m = to_mobj_mm(mobj);
292
293 tee_mm_free(m->mm);
294 free(m);
295 }
296
297 /*
298 * Note: this variable is weak just to ease breaking its dependency chain
299 * when added to the unpaged area.
300 */
301 const struct mobj_ops mobj_mm_ops __weak __relrodata_unpaged("mobj_mm_ops") = {
302 .get_va = mobj_mm_get_va,
303 .get_pa = mobj_mm_get_pa,
304 .get_phys_offs = mobj_mm_get_phys_offs,
305 .get_mem_type = mobj_mm_get_mem_type,
306 .matches = mobj_mm_matches,
307 .free = mobj_mm_free,
308 };
309
to_mobj_mm(struct mobj * mobj)310 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
311 {
312 assert(mobj->ops == &mobj_mm_ops);
313 return container_of(mobj, struct mobj_mm, mobj);
314 }
315
mobj_mm_alloc(struct mobj * mobj_parent,size_t size,tee_mm_pool_t * pool)316 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
317 tee_mm_pool_t *pool)
318 {
319 struct mobj_mm *m = calloc(1, sizeof(*m));
320
321 if (!m)
322 return NULL;
323
324 m->mm = tee_mm_alloc(pool, size);
325 if (!m->mm) {
326 free(m);
327 return NULL;
328 }
329
330 m->parent_mobj = mobj_parent;
331 m->mobj.size = size;
332 m->mobj.ops = &mobj_mm_ops;
333 refcount_set(&m->mobj.refc, 1);
334
335 return &m->mobj;
336 }
337
338
339 /*
340 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
341 * - it is physically contiguous.
342 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
343 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
344 * generic CORE_MEM_NON_SEC.
345 */
346
347 struct mobj_shm {
348 struct mobj mobj;
349 paddr_t pa;
350 uint64_t cookie;
351 };
352
353 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
354
mobj_shm_get_va(struct mobj * mobj,size_t offset,size_t len)355 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
356 {
357 struct mobj_shm *m = to_mobj_shm(mobj);
358
359 if (!mobj_check_offset_and_len(mobj, offset, len))
360 return NULL;
361
362 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
363 mobj->size - offset);
364 }
365
mobj_shm_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)366 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
367 size_t granule, paddr_t *pa)
368 {
369 struct mobj_shm *m = to_mobj_shm(mobj);
370 paddr_t p;
371
372 if (!pa || offs >= mobj->size)
373 return TEE_ERROR_GENERIC;
374
375 p = m->pa + offs;
376
377 if (granule) {
378 if (granule != SMALL_PAGE_SIZE &&
379 granule != CORE_MMU_PGDIR_SIZE)
380 return TEE_ERROR_GENERIC;
381 p &= ~(granule - 1);
382 }
383
384 *pa = p;
385 return TEE_SUCCESS;
386 }
387 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
388
mobj_shm_get_phys_offs(struct mobj * mobj,size_t granule)389 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
390 {
391 assert(IS_POWER_OF_TWO(granule));
392 return to_mobj_shm(mobj)->pa & (granule - 1);
393 }
394
mobj_shm_matches(struct mobj * mobj __unused,enum buf_is_attr attr)395 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
396 {
397 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
398 }
399
mobj_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mem_type)400 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused,
401 uint32_t *mem_type)
402 {
403 if (!mem_type)
404 return TEE_ERROR_GENERIC;
405
406 *mem_type = TEE_MATTR_MEM_TYPE_CACHED;
407
408 return TEE_SUCCESS;
409 }
410
mobj_shm_free(struct mobj * mobj)411 static void mobj_shm_free(struct mobj *mobj)
412 {
413 struct mobj_shm *m = to_mobj_shm(mobj);
414
415 free(m);
416 }
417
mobj_shm_get_cookie(struct mobj * mobj)418 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
419 {
420 return to_mobj_shm(mobj)->cookie;
421 }
422
423 /*
424 * Note: this variable is weak just to ease breaking its dependency chain
425 * when added to the unpaged area.
426 */
427 const struct mobj_ops mobj_shm_ops
428 __weak __relrodata_unpaged("mobj_shm_ops") = {
429 .get_va = mobj_shm_get_va,
430 .get_pa = mobj_shm_get_pa,
431 .get_phys_offs = mobj_shm_get_phys_offs,
432 .get_mem_type = mobj_shm_get_mem_type,
433 .matches = mobj_shm_matches,
434 .free = mobj_shm_free,
435 .get_cookie = mobj_shm_get_cookie,
436 };
437
to_mobj_shm(struct mobj * mobj)438 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
439 {
440 assert(mobj->ops == &mobj_shm_ops);
441 return container_of(mobj, struct mobj_shm, mobj);
442 }
443
mobj_shm_alloc(paddr_t pa,size_t size,uint64_t cookie)444 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
445 {
446 struct mobj_shm *m;
447
448 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
449 return NULL;
450
451 m = calloc(1, sizeof(*m));
452 if (!m)
453 return NULL;
454
455 m->mobj.size = size;
456 m->mobj.ops = &mobj_shm_ops;
457 m->mobj.phys_granule = SMALL_PAGE_SIZE;
458 refcount_set(&m->mobj.refc, 1);
459 m->pa = pa;
460 m->cookie = cookie;
461
462 return &m->mobj;
463 }
464
465 #ifdef CFG_PAGED_USER_TA
466 /*
467 * mobj_seccpy_shm implementation
468 */
469
470 struct mobj_seccpy_shm {
471 struct user_ta_ctx *utc;
472 vaddr_t va;
473 struct mobj mobj;
474 struct fobj *fobj;
475 };
476
477 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
478
to_mobj_seccpy_shm(struct mobj * mobj)479 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
480 {
481 assert(mobj_is_seccpy_shm(mobj));
482 return container_of(mobj, struct mobj_seccpy_shm, mobj);
483 }
484
mobj_seccpy_shm_get_va(struct mobj * mobj,size_t offs,size_t len)485 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs, size_t len)
486 {
487 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
488
489 if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
490 return NULL;
491
492 if (!mobj_check_offset_and_len(mobj, offs, len))
493 return NULL;
494 return (void *)(m->va + offs);
495 }
496
mobj_seccpy_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)497 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
498 enum buf_is_attr attr)
499 {
500 assert(mobj_is_seccpy_shm(mobj));
501
502 return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
503 }
504
mobj_seccpy_shm_free(struct mobj * mobj)505 static void mobj_seccpy_shm_free(struct mobj *mobj)
506 {
507 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
508
509 tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
510 vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
511 fobj_put(m->fobj);
512 free(m);
513 }
514
mobj_seccpy_shm_get_fobj(struct mobj * mobj)515 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
516 {
517 return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
518 }
519
520 /*
521 * Note: this variable is weak just to ease breaking its dependency chain
522 * when added to the unpaged area.
523 */
524 const struct mobj_ops mobj_seccpy_shm_ops
525 __weak __relrodata_unpaged("mobj_seccpy_shm_ops") = {
526 .get_va = mobj_seccpy_shm_get_va,
527 .matches = mobj_seccpy_shm_matches,
528 .free = mobj_seccpy_shm_free,
529 .get_fobj = mobj_seccpy_shm_get_fobj,
530 };
531
mobj_is_seccpy_shm(struct mobj * mobj)532 static bool mobj_is_seccpy_shm(struct mobj *mobj)
533 {
534 return mobj && mobj->ops == &mobj_seccpy_shm_ops;
535 }
536
mobj_seccpy_shm_alloc(size_t size)537 struct mobj *mobj_seccpy_shm_alloc(size_t size)
538 {
539 struct thread_specific_data *tsd = thread_get_tsd();
540 struct mobj_seccpy_shm *m;
541 struct user_ta_ctx *utc;
542 vaddr_t va = 0;
543
544 if (!is_user_ta_ctx(tsd->ctx))
545 return NULL;
546 utc = to_user_ta_ctx(tsd->ctx);
547
548 m = calloc(1, sizeof(*m));
549 if (!m)
550 return NULL;
551
552 m->mobj.size = size;
553 m->mobj.ops = &mobj_seccpy_shm_ops;
554 refcount_set(&m->mobj.refc, 1);
555
556 if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
557 goto bad;
558
559 m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
560 SMALL_PAGE_SIZE);
561 if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
562 TEE_MATTR_PRW | TEE_MATTR_URW))
563 goto bad;
564
565 m->va = va;
566 m->utc = to_user_ta_ctx(tsd->ctx);
567 return &m->mobj;
568 bad:
569 if (va)
570 vm_rem_rwmem(&utc->uctx, &m->mobj, va);
571 fobj_put(m->fobj);
572 free(m);
573 return NULL;
574 }
575
576
577 #endif /*CFG_PAGED_USER_TA*/
578
579 struct mobj_with_fobj {
580 struct fobj *fobj;
581 struct file *file;
582 struct mobj mobj;
583 uint8_t mem_type;
584 };
585
586 const struct mobj_ops mobj_with_fobj_ops;
587
mobj_with_fobj_alloc(struct fobj * fobj,struct file * file,uint32_t mem_type)588 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
589 uint32_t mem_type)
590 {
591 struct mobj_with_fobj *m = NULL;
592
593 assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK));
594
595 if (!fobj)
596 return NULL;
597 if (mem_type > UINT8_MAX)
598 return NULL;
599
600 m = calloc(1, sizeof(*m));
601 if (!m)
602 return NULL;
603
604 m->mobj.ops = &mobj_with_fobj_ops;
605 refcount_set(&m->mobj.refc, 1);
606 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
607 m->mobj.phys_granule = SMALL_PAGE_SIZE;
608 m->fobj = fobj_get(fobj);
609 m->file = file_get(file);
610 m->mem_type = mem_type;
611
612 return &m->mobj;
613 }
614
to_mobj_with_fobj(struct mobj * mobj)615 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
616 {
617 assert(mobj && mobj->ops == &mobj_with_fobj_ops);
618
619 return container_of(mobj, struct mobj_with_fobj, mobj);
620 }
621
mobj_with_fobj_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)622 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
623 enum buf_is_attr attr)
624 {
625 assert(to_mobj_with_fobj(mobj));
626
627 /*
628 * All fobjs are supposed to be mapped secure so classify it as
629 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
630 * needed it can probably be carried in another way than to put the
631 * burden directly on fobj.
632 */
633 return attr == CORE_MEM_SEC;
634 }
635
mobj_with_fobj_free(struct mobj * mobj)636 static void mobj_with_fobj_free(struct mobj *mobj)
637 {
638 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
639
640 fobj_put(m->fobj);
641 file_put(m->file);
642 free(m);
643 }
644
mobj_with_fobj_get_fobj(struct mobj * mobj)645 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
646 {
647 return fobj_get(to_mobj_with_fobj(mobj)->fobj);
648 }
649
mobj_with_fobj_get_mem_type(struct mobj * mobj,uint32_t * mem_type)650 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj,
651 uint32_t *mem_type)
652 {
653 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
654
655 if (!mem_type)
656 return TEE_ERROR_GENERIC;
657
658 *mem_type = m->mem_type;
659
660 return TEE_SUCCESS;
661 }
662
mobj_with_fobj_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)663 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
664 size_t granule, paddr_t *pa)
665 {
666 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
667 paddr_t p = 0;
668
669 if (!f->fobj->ops->get_pa) {
670 assert(mobj_is_paged(mobj));
671 return TEE_ERROR_NOT_SUPPORTED;
672 }
673
674 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
675 offs % SMALL_PAGE_SIZE;
676
677 if (granule) {
678 if (granule != SMALL_PAGE_SIZE &&
679 granule != CORE_MMU_PGDIR_SIZE)
680 return TEE_ERROR_GENERIC;
681 p &= ~(granule - 1);
682 }
683
684 *pa = p;
685
686 return TEE_SUCCESS;
687 }
688 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
689
690 /*
691 * Note: this variable is weak just to ease breaking its dependency chain
692 * when added to the unpaged area.
693 */
694 const struct mobj_ops mobj_with_fobj_ops
695 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
696 .matches = mobj_with_fobj_matches,
697 .free = mobj_with_fobj_free,
698 .get_fobj = mobj_with_fobj_get_fobj,
699 .get_mem_type = mobj_with_fobj_get_mem_type,
700 .get_pa = mobj_with_fobj_get_pa,
701 };
702
703 #ifdef CFG_PAGED_USER_TA
mobj_is_paged(struct mobj * mobj)704 bool mobj_is_paged(struct mobj *mobj)
705 {
706 if (mobj->ops == &mobj_seccpy_shm_ops)
707 return true;
708
709 if (mobj->ops == &mobj_with_fobj_ops &&
710 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
711 return true;
712
713 return false;
714 }
715 #endif /*CFG_PAGED_USER_TA*/
716
mobj_init(void)717 static TEE_Result mobj_init(void)
718 {
719 mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
720 tee_mm_sec_ddr.size,
721 TEE_MATTR_MEM_TYPE_CACHED,
722 CORE_MEM_TA_RAM);
723 if (!mobj_sec_ddr)
724 panic("Failed to register secure ta ram");
725
726 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
727 mobj_tee_ram_rx = mobj_phys_init(0,
728 VCORE_UNPG_RX_SZ,
729 TEE_MATTR_MEM_TYPE_CACHED,
730 CORE_MEM_TEE_RAM,
731 MEM_AREA_TEE_RAM_RX);
732 if (!mobj_tee_ram_rx)
733 panic("Failed to register tee ram rx");
734
735 mobj_tee_ram_rw = mobj_phys_init(0,
736 VCORE_UNPG_RW_SZ,
737 TEE_MATTR_MEM_TYPE_CACHED,
738 CORE_MEM_TEE_RAM,
739 MEM_AREA_TEE_RAM_RW_DATA);
740 if (!mobj_tee_ram_rw)
741 panic("Failed to register tee ram rw");
742 } else {
743 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
744 VCORE_UNPG_RW_PA +
745 VCORE_UNPG_RW_SZ -
746 TEE_RAM_START,
747 TEE_MATTR_MEM_TYPE_CACHED,
748 CORE_MEM_TEE_RAM,
749 MEM_AREA_TEE_RAM_RW_DATA);
750 if (!mobj_tee_ram_rw)
751 panic("Failed to register tee ram");
752
753 mobj_tee_ram_rx = mobj_tee_ram_rw;
754 }
755
756 return TEE_SUCCESS;
757 }
758
759 driver_init_late(mobj_init);
760