1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019-2022, Linaro Limited
4 */
5
6 #include <config.h>
7 #include <crypto/crypto.h>
8 #include <crypto/internal_aes-gcm.h>
9 #include <initcall.h>
10 #include <kernel/boot.h>
11 #include <kernel/panic.h>
12 #include <memtag.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <mm/fobj.h>
16 #include <mm/tee_mm.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <tee_api_types.h>
20 #include <types_ext.h>
21 #include <util.h>
22
23 #ifdef CFG_WITH_PAGER
24
25 #define RWP_AE_KEY_BITS 256
26
27 struct rwp_aes_gcm_iv {
28 uint32_t iv[3];
29 };
30
31 #define RWP_AES_GCM_TAG_LEN 16
32
33 struct rwp_state {
34 uint64_t iv;
35 uint8_t tag[RWP_AES_GCM_TAG_LEN];
36 };
37
38 /*
39 * Note that this struct is padded to a size which is a power of 2, this
40 * guarantees that this state will not span two pages. This avoids a corner
41 * case in the pager when making the state available.
42 */
43 struct rwp_state_padded {
44 struct rwp_state state;
45 uint64_t pad;
46 };
47
48 struct fobj_rwp_unpaged_iv {
49 uint8_t *store;
50 struct rwp_state *state;
51 struct fobj fobj;
52 };
53
54 struct fobj_rwp_paged_iv {
55 size_t idx;
56 struct fobj fobj;
57 };
58
59 const struct fobj_ops ops_rwp_paged_iv;
60 const struct fobj_ops ops_rwp_unpaged_iv;
61
62 static struct internal_aes_gcm_key rwp_ae_key;
63
64 static struct rwp_state_padded *rwp_state_base;
65 static uint8_t *rwp_store_base;
66
fobj_init(struct fobj * fobj,const struct fobj_ops * ops,unsigned int num_pages)67 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
68 unsigned int num_pages)
69 {
70 fobj->ops = ops;
71 fobj->num_pages = num_pages;
72 refcount_set(&fobj->refc, 1);
73 TAILQ_INIT(&fobj->regions);
74 }
75
fobj_uninit(struct fobj * fobj)76 static void fobj_uninit(struct fobj *fobj)
77 {
78 assert(!refcount_val(&fobj->refc));
79 assert(TAILQ_EMPTY(&fobj->regions));
80 tee_pager_invalidate_fobj(fobj);
81 }
82
rwp_load_page(void * va,struct rwp_state * state,const uint8_t * src)83 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
84 const uint8_t *src)
85 {
86 struct rwp_aes_gcm_iv iv = {
87 .iv = { (vaddr_t)state, state->iv >> 32, state->iv }
88 };
89
90 if (!state->iv) {
91 /*
92 * IV still zero which means that this is previously unused
93 * page.
94 */
95 memset(va, 0, SMALL_PAGE_SIZE);
96 return TEE_SUCCESS;
97 }
98
99 return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
100 NULL, 0, src, SMALL_PAGE_SIZE, va,
101 state->tag, sizeof(state->tag));
102 }
103
rwp_save_page(const void * va,struct rwp_state * state,uint8_t * dst)104 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
105 uint8_t *dst)
106 {
107 size_t tag_len = sizeof(state->tag);
108 struct rwp_aes_gcm_iv iv = { };
109
110 assert(state->iv + 1 > state->iv);
111
112 state->iv++;
113
114 /*
115 * IV is constructed as recommended in section "8.2.1 Deterministic
116 * Construction" of "Recommendation for Block Cipher Modes of
117 * Operation: Galois/Counter Mode (GCM) and GMAC",
118 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
119 */
120 iv.iv[0] = (vaddr_t)state;
121 iv.iv[1] = state->iv >> 32;
122 iv.iv[2] = state->iv;
123
124 return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
125 NULL, 0, va, SMALL_PAGE_SIZE, dst,
126 state->tag, &tag_len);
127 }
128
idx_to_state_padded(size_t idx)129 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
130 {
131 assert(rwp_state_base);
132 return rwp_state_base + idx;
133 }
134
idx_to_store(size_t idx)135 static uint8_t *idx_to_store(size_t idx)
136 {
137 assert(rwp_store_base);
138 return rwp_store_base + idx * SMALL_PAGE_SIZE;
139 }
140
rwp_paged_iv_alloc(unsigned int num_pages)141 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
142 {
143 struct fobj_rwp_paged_iv *rwp = NULL;
144 tee_mm_entry_t *mm = NULL;
145 size_t size = 0;
146
147 COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
148
149 rwp = calloc(1, sizeof(*rwp));
150 if (!rwp)
151 return NULL;
152
153 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
154 goto err;
155 mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
156 if (!mm)
157 goto err;
158 rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE;
159
160 memset(idx_to_state_padded(rwp->idx), 0,
161 num_pages * sizeof(struct rwp_state_padded));
162
163 fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
164
165 return &rwp->fobj;
166 err:
167 tee_mm_free(mm);
168 free(rwp);
169
170 return NULL;
171 }
172
to_rwp_paged_iv(struct fobj * fobj)173 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
174 {
175 assert(fobj->ops == &ops_rwp_paged_iv);
176
177 return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
178 }
179
rwp_paged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)180 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
181 unsigned int page_idx, void *va)
182 {
183 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
184 uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
185 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
186
187 assert(refcount_val(&fobj->refc));
188 assert(page_idx < fobj->num_pages);
189
190 return rwp_load_page(va, &st->state, src);
191 }
192 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
193
rwp_paged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)194 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
195 unsigned int page_idx, const void *va)
196 {
197 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
198 uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
199 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
200
201 assert(page_idx < fobj->num_pages);
202
203 if (!refcount_val(&fobj->refc)) {
204 /*
205 * This fobj is being teared down, it just hasn't had the time
206 * to call tee_pager_invalidate_fobj() yet.
207 */
208 assert(TAILQ_EMPTY(&fobj->regions));
209 return TEE_SUCCESS;
210 }
211
212 return rwp_save_page(va, &st->state, dst);
213 }
214 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
215
rwp_paged_iv_free(struct fobj * fobj)216 static void rwp_paged_iv_free(struct fobj *fobj)
217 {
218 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
219 paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo;
220 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa);
221
222 assert(mm);
223
224 fobj_uninit(fobj);
225 tee_mm_free(mm);
226 free(rwp);
227 }
228
rwp_paged_iv_get_iv_vaddr(struct fobj * fobj,unsigned int page_idx)229 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
230 unsigned int page_idx)
231 {
232 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
233 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
234
235 assert(page_idx < fobj->num_pages);
236 return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
237 }
238 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
239
240 /*
241 * Note: this variable is weak just to ease breaking its dependency chain
242 * when added to the unpaged area.
243 */
244 const struct fobj_ops ops_rwp_paged_iv
245 __weak __relrodata_unpaged("ops_rwp_paged_iv") = {
246 .free = rwp_paged_iv_free,
247 .load_page = rwp_paged_iv_load_page,
248 .save_page = rwp_paged_iv_save_page,
249 .get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
250 };
251
rwp_unpaged_iv_alloc(unsigned int num_pages)252 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
253 {
254 struct fobj_rwp_unpaged_iv *rwp = NULL;
255 tee_mm_entry_t *mm = NULL;
256 size_t size = 0;
257
258 rwp = calloc(1, sizeof(*rwp));
259 if (!rwp)
260 return NULL;
261
262 rwp->state = calloc(num_pages, sizeof(*rwp->state));
263 if (!rwp->state)
264 goto err_free_rwp;
265
266 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
267 goto err_free_state;
268 mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
269 if (!mm)
270 goto err_free_state;
271 rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, size);
272 assert(rwp->store);
273
274 fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
275
276 return &rwp->fobj;
277
278 err_free_state:
279 free(rwp->state);
280 err_free_rwp:
281 free(rwp);
282 return NULL;
283 }
284
to_rwp_unpaged_iv(struct fobj * fobj)285 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
286 {
287 assert(fobj->ops == &ops_rwp_unpaged_iv);
288
289 return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
290 }
291
rwp_unpaged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)292 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
293 unsigned int page_idx, void *va)
294 {
295 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
296 uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
297
298 assert(refcount_val(&fobj->refc));
299 assert(page_idx < fobj->num_pages);
300
301 return rwp_load_page(va, rwp->state + page_idx, src);
302 }
303 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
304
rwp_unpaged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)305 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
306 unsigned int page_idx,
307 const void *va)
308 {
309 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
310 uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
311
312 assert(page_idx < fobj->num_pages);
313
314 if (!refcount_val(&fobj->refc)) {
315 /*
316 * This fobj is being teared down, it just hasn't had the time
317 * to call tee_pager_invalidate_fobj() yet.
318 */
319 assert(TAILQ_EMPTY(&fobj->regions));
320 return TEE_SUCCESS;
321 }
322
323 return rwp_save_page(va, rwp->state + page_idx, dst);
324 }
325 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
326
rwp_unpaged_iv_free(struct fobj * fobj)327 static void rwp_unpaged_iv_free(struct fobj *fobj)
328 {
329 struct fobj_rwp_unpaged_iv *rwp = NULL;
330 tee_mm_entry_t *mm = NULL;
331
332 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
333 panic();
334
335 rwp = to_rwp_unpaged_iv(fobj);
336 mm = tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store));
337
338 assert(mm);
339
340 fobj_uninit(fobj);
341 tee_mm_free(mm);
342 free(rwp->state);
343 free(rwp);
344 }
345
346 /*
347 * Note: this variable is weak just to ease breaking its dependency chain
348 * when added to the unpaged area.
349 */
350 const struct fobj_ops ops_rwp_unpaged_iv
351 __weak __relrodata_unpaged("ops_rwp_unpaged_iv") = {
352 .free = rwp_unpaged_iv_free,
353 .load_page = rwp_unpaged_iv_load_page,
354 .save_page = rwp_unpaged_iv_save_page,
355 };
356
rwp_init(void)357 static TEE_Result rwp_init(void)
358 {
359 uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
360 struct fobj *fobj = NULL;
361 size_t num_pool_pages = 0;
362 size_t num_fobj_pages = 0;
363
364 if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
365 panic("failed to generate random");
366 if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
367 sizeof(rwp_ae_key.data),
368 &rwp_ae_key.rounds))
369 panic("failed to expand key");
370
371 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
372 return TEE_SUCCESS;
373
374 assert(tee_mm_sec_ddr.size && !(tee_mm_sec_ddr.size & SMALL_PAGE_SIZE));
375
376 num_pool_pages = tee_mm_sec_ddr.size / SMALL_PAGE_SIZE;
377 num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base),
378 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
379
380 /*
381 * Each page in the pool needs a struct rwp_state.
382 *
383 * This isn't entirely true, the pages not used by
384 * fobj_rw_paged_alloc() don't need any. A future optimization
385 * may try to avoid allocating for such pages.
386 */
387 fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
388 if (!fobj)
389 panic();
390
391 rwp_state_base = (void *)tee_pager_init_iv_region(fobj);
392 assert(rwp_state_base);
393
394 rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM,
395 tee_mm_sec_ddr.size);
396 assert(rwp_store_base);
397
398 return TEE_SUCCESS;
399 }
400 driver_init_late(rwp_init);
401
fobj_rw_paged_alloc(unsigned int num_pages)402 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
403 {
404 assert(num_pages);
405
406 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
407 return rwp_paged_iv_alloc(num_pages);
408 else
409 return rwp_unpaged_iv_alloc(num_pages);
410 }
411
412 struct fobj_rop {
413 uint8_t *hashes;
414 uint8_t *store;
415 struct fobj fobj;
416 };
417
418 const struct fobj_ops ops_ro_paged;
419
rop_init(struct fobj_rop * rop,const struct fobj_ops * ops,unsigned int num_pages,void * hashes,void * store)420 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
421 unsigned int num_pages, void *hashes, void *store)
422 {
423 rop->hashes = hashes;
424 rop->store = store;
425 fobj_init(&rop->fobj, ops, num_pages);
426 }
427
fobj_ro_paged_alloc(unsigned int num_pages,void * hashes,void * store)428 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
429 void *store)
430 {
431 struct fobj_rop *rop = NULL;
432
433 assert(num_pages && hashes && store);
434
435 rop = calloc(1, sizeof(*rop));
436 if (!rop)
437 return NULL;
438
439 rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
440
441 return &rop->fobj;
442 }
443
to_rop(struct fobj * fobj)444 static struct fobj_rop *to_rop(struct fobj *fobj)
445 {
446 assert(fobj->ops == &ops_ro_paged);
447
448 return container_of(fobj, struct fobj_rop, fobj);
449 }
450
rop_uninit(struct fobj_rop * rop)451 static void rop_uninit(struct fobj_rop *rop)
452 {
453 fobj_uninit(&rop->fobj);
454 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
455 free(rop->hashes);
456 }
457
rop_free(struct fobj * fobj)458 static void rop_free(struct fobj *fobj)
459 {
460 struct fobj_rop *rop = to_rop(fobj);
461
462 rop_uninit(rop);
463 free(rop);
464 }
465
rop_load_page_helper(struct fobj_rop * rop,unsigned int page_idx,void * va)466 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
467 unsigned int page_idx, void *va)
468 {
469 const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
470 const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
471
472 assert(refcount_val(&rop->fobj.refc));
473 assert(page_idx < rop->fobj.num_pages);
474 memcpy(va, src, SMALL_PAGE_SIZE);
475
476 return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
477 }
478
rop_load_page(struct fobj * fobj,unsigned int page_idx,void * va)479 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
480 void *va)
481 {
482 return rop_load_page_helper(to_rop(fobj), page_idx, va);
483 }
484 DECLARE_KEEP_PAGER(rop_load_page);
485
rop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)486 static TEE_Result rop_save_page(struct fobj *fobj __unused,
487 unsigned int page_idx __unused,
488 const void *va __unused)
489 {
490 return TEE_ERROR_GENERIC;
491 }
492 DECLARE_KEEP_PAGER(rop_save_page);
493
494 /*
495 * Note: this variable is weak just to ease breaking its dependency chain
496 * when added to the unpaged area.
497 */
498 const struct fobj_ops ops_ro_paged
499 __weak __relrodata_unpaged("ops_ro_paged") = {
500 .free = rop_free,
501 .load_page = rop_load_page,
502 .save_page = rop_save_page,
503 };
504
505 #ifdef CFG_CORE_ASLR
506 /*
507 * When using relocated pages the relocation information must be applied
508 * before the pages can be used. With read-only paging the content is only
509 * integrity protected so relocation cannot be applied on pages in the less
510 * secure "store" or the load_address selected by ASLR could be given away.
511 * This means that each time a page has been loaded and verified it has to
512 * have its relocation information applied before it can be used.
513 *
514 * Only the relative relocations are supported, this allows a rather compact
515 * represenation of the needed relocation information in this struct.
516 * r_offset is replaced with the offset into the page that need to be updated,
517 * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
518 * used to represent it.
519 *
520 * All relocations are converted and stored in @relocs. @page_reloc_idx is
521 * an array of length @rop.fobj.num_pages with an entry for each page. If
522 * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
523 */
524 struct fobj_ro_reloc_paged {
525 uint16_t *page_reloc_idx;
526 uint16_t *relocs;
527 unsigned int num_relocs;
528 struct fobj_rop rop;
529 };
530
531 const struct fobj_ops ops_ro_reloc_paged;
532
get_num_rels(unsigned int num_pages,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)533 static unsigned int get_num_rels(unsigned int num_pages,
534 unsigned int reloc_offs,
535 const uint32_t *reloc, unsigned int num_relocs)
536 {
537 const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
538 unsigned int nrels = 0;
539 unsigned int n = 0;
540 vaddr_t offs = 0;
541
542 /*
543 * Count the number of relocations which are needed for these
544 * pages. Also check that the data is well formed, only expected
545 * relocations and sorted in order of address which it applies to.
546 */
547 for (; n < num_relocs; n++) {
548 assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long));
549 assert(offs < reloc[n]); /* check that it's sorted */
550 offs = reloc[n];
551 if (offs >= reloc_offs &&
552 offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
553 nrels++;
554 }
555
556 return nrels;
557 }
558
init_rels(struct fobj_ro_reloc_paged * rrp,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)559 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
560 const uint32_t *reloc, unsigned int num_relocs)
561 {
562 unsigned int npg = rrp->rop.fobj.num_pages;
563 unsigned int pg_idx = 0;
564 unsigned int reln = 0;
565 unsigned int n = 0;
566 uint32_t r = 0;
567
568 for (n = 0; n < npg; n++)
569 rrp->page_reloc_idx[n] = UINT16_MAX;
570
571 for (n = 0; n < num_relocs ; n++) {
572 if (reloc[n] < reloc_offs)
573 continue;
574
575 /* r is the offset from beginning of this fobj */
576 r = reloc[n] - reloc_offs;
577
578 pg_idx = r / SMALL_PAGE_SIZE;
579 if (pg_idx >= npg)
580 break;
581
582 if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
583 rrp->page_reloc_idx[pg_idx] = reln;
584 rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
585 reln++;
586 }
587
588 assert(reln == rrp->num_relocs);
589 }
590
fobj_ro_reloc_paged_alloc(unsigned int num_pages,void * hashes,unsigned int reloc_offs,const void * reloc,unsigned int reloc_len,void * store)591 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
592 unsigned int reloc_offs,
593 const void *reloc,
594 unsigned int reloc_len, void *store)
595 {
596 struct fobj_ro_reloc_paged *rrp = NULL;
597 const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
598 unsigned int nrels = 0;
599
600 assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t));
601 assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t));
602 assert(num_pages && hashes && store);
603 if (!reloc_len) {
604 assert(!reloc);
605 return fobj_ro_paged_alloc(num_pages, hashes, store);
606 }
607 assert(reloc);
608
609 nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
610 if (!nrels)
611 return fobj_ro_paged_alloc(num_pages, hashes, store);
612
613 rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
614 nrels * sizeof(uint16_t));
615 if (!rrp)
616 return NULL;
617 rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
618 rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
619 rrp->relocs = rrp->page_reloc_idx + num_pages;
620 rrp->num_relocs = nrels;
621 init_rels(rrp, reloc_offs, reloc, num_relocs);
622
623 return &rrp->rop.fobj;
624 }
625
to_rrp(struct fobj * fobj)626 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
627 {
628 assert(fobj->ops == &ops_ro_reloc_paged);
629
630 return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
631 }
632
rrp_free(struct fobj * fobj)633 static void rrp_free(struct fobj *fobj)
634 {
635 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
636
637 rop_uninit(&rrp->rop);
638 free(rrp);
639 }
640
rrp_load_page(struct fobj * fobj,unsigned int page_idx,void * va)641 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
642 void *va)
643 {
644 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
645 unsigned int end_rel = rrp->num_relocs;
646 TEE_Result res = TEE_SUCCESS;
647 unsigned long *where = NULL;
648 unsigned int n = 0;
649
650 res = rop_load_page_helper(&rrp->rop, page_idx, va);
651 if (res)
652 return res;
653
654 /* Find the reloc index of the next page to tell when we're done */
655 for (n = page_idx + 1; n < fobj->num_pages; n++) {
656 if (rrp->page_reloc_idx[n] != UINT16_MAX) {
657 end_rel = rrp->page_reloc_idx[n];
658 break;
659 }
660 }
661
662 for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
663 where = (void *)((vaddr_t)va + rrp->relocs[n]);
664 *where += boot_mmu_config.load_offset;
665 }
666
667 return TEE_SUCCESS;
668 }
669 DECLARE_KEEP_PAGER(rrp_load_page);
670
671 /*
672 * Note: this variable is weak just to ease breaking its dependency chain
673 * when added to the unpaged area.
674 */
675 const struct fobj_ops ops_ro_reloc_paged
676 __weak __relrodata_unpaged("ops_ro_reloc_paged") = {
677 .free = rrp_free,
678 .load_page = rrp_load_page,
679 .save_page = rop_save_page, /* Direct reuse */
680 };
681 #endif /*CFG_CORE_ASLR*/
682
683 const struct fobj_ops ops_locked_paged;
684
fobj_locked_paged_alloc(unsigned int num_pages)685 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
686 {
687 struct fobj *f = NULL;
688
689 assert(num_pages);
690
691 f = calloc(1, sizeof(*f));
692 if (!f)
693 return NULL;
694
695 fobj_init(f, &ops_locked_paged, num_pages);
696
697 return f;
698 }
699
lop_free(struct fobj * fobj)700 static void lop_free(struct fobj *fobj)
701 {
702 assert(fobj->ops == &ops_locked_paged);
703 fobj_uninit(fobj);
704 free(fobj);
705 }
706
lop_load_page(struct fobj * fobj __maybe_unused,unsigned int page_idx __maybe_unused,void * va)707 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
708 unsigned int page_idx __maybe_unused,
709 void *va)
710 {
711 assert(fobj->ops == &ops_locked_paged);
712 assert(refcount_val(&fobj->refc));
713 assert(page_idx < fobj->num_pages);
714
715 memset(va, 0, SMALL_PAGE_SIZE);
716
717 return TEE_SUCCESS;
718 }
719 DECLARE_KEEP_PAGER(lop_load_page);
720
lop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)721 static TEE_Result lop_save_page(struct fobj *fobj __unused,
722 unsigned int page_idx __unused,
723 const void *va __unused)
724 {
725 return TEE_ERROR_GENERIC;
726 }
727 DECLARE_KEEP_PAGER(lop_save_page);
728
729 /*
730 * Note: this variable is weak just to ease breaking its dependency chain
731 * when added to the unpaged area.
732 */
733 const struct fobj_ops ops_locked_paged
734 __weak __relrodata_unpaged("ops_locked_paged") = {
735 .free = lop_free,
736 .load_page = lop_load_page,
737 .save_page = lop_save_page,
738 };
739 #endif /*CFG_WITH_PAGER*/
740
741 #ifndef CFG_PAGED_USER_TA
742
743 struct fobj_sec_mem {
744 tee_mm_entry_t *mm;
745 struct fobj fobj;
746 };
747
748 const struct fobj_ops ops_sec_mem;
749
fobj_sec_mem_alloc(unsigned int num_pages)750 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
751 {
752 struct fobj_sec_mem *f = calloc(1, sizeof(*f));
753 size_t size = 0;
754 void *va = NULL;
755
756 if (!f)
757 return NULL;
758
759 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
760 goto err;
761
762 f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
763 if (!f->mm)
764 goto err;
765
766 va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM, size);
767 if (!va)
768 goto err;
769
770 memtag_clear_mem(va, size);
771 f->fobj.ops = &ops_sec_mem;
772 f->fobj.num_pages = num_pages;
773 refcount_set(&f->fobj.refc, 1);
774
775 return &f->fobj;
776 err:
777 tee_mm_free(f->mm);
778 free(f);
779
780 return NULL;
781 }
782
to_sec_mem(struct fobj * fobj)783 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
784 {
785 assert(fobj->ops == &ops_sec_mem);
786
787 return container_of(fobj, struct fobj_sec_mem, fobj);
788 }
789
sec_mem_free(struct fobj * fobj)790 static void sec_mem_free(struct fobj *fobj)
791 {
792 struct fobj_sec_mem *f = to_sec_mem(fobj);
793
794 assert(!refcount_val(&fobj->refc));
795 tee_mm_free(f->mm);
796 free(f);
797 }
798
sec_mem_get_pa(struct fobj * fobj,unsigned int page_idx)799 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
800 {
801 struct fobj_sec_mem *f = to_sec_mem(fobj);
802
803 assert(refcount_val(&fobj->refc));
804 assert(page_idx < fobj->num_pages);
805
806 return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
807 }
808
809 /*
810 * Note: this variable is weak just to ease breaking its dependency chain
811 * when added to the unpaged area.
812 */
813 const struct fobj_ops ops_sec_mem __weak __relrodata_unpaged("ops_sec_mem") = {
814 .free = sec_mem_free,
815 .get_pa = sec_mem_get_pa,
816 };
817
818 #endif /*PAGED_USER_TA*/
819