1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2020, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/thread_spmc.h>
13 #include <mm/mobj.h>
14 #include <sys/queue.h>
15
16 struct mobj_ffa {
17 struct mobj mobj;
18 SLIST_ENTRY(mobj_ffa) link;
19 uint64_t cookie;
20 tee_mm_entry_t *mm;
21 struct refcount mapcount;
22 uint16_t page_offset;
23 #ifdef CFG_CORE_SEL1_SPMC
24 bool registered_by_cookie;
25 bool unregistered_by_cookie;
26 #endif
27 paddr_t pages[];
28 };
29
30 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
31
32 #ifdef CFG_CORE_SEL1_SPMC
33 #define NUM_SHMS 64
34 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
35 #endif
36
37 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
38 static struct mobj_ffa_head shm_inactive_head =
39 SLIST_HEAD_INITIALIZER(shm_inactive_head);
40
41 static unsigned int shm_lock = SPINLOCK_UNLOCK;
42
43 static const struct mobj_ops mobj_ffa_ops;
44
to_mobj_ffa(struct mobj * mobj)45 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
46 {
47 assert(mobj->ops == &mobj_ffa_ops);
48 return container_of(mobj, struct mobj_ffa, mobj);
49 }
50
shm_size(size_t num_pages)51 static size_t shm_size(size_t num_pages)
52 {
53 size_t s = 0;
54
55 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
56 return 0;
57 if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
58 return 0;
59 return s;
60 }
61
ffa_new(unsigned int num_pages)62 static struct mobj_ffa *ffa_new(unsigned int num_pages)
63 {
64 struct mobj_ffa *mf = NULL;
65 size_t s = 0;
66
67 if (!num_pages)
68 return NULL;
69
70 s = shm_size(num_pages);
71 if (!s)
72 return NULL;
73 mf = calloc(1, s);
74 if (!mf)
75 return NULL;
76
77 mf->mobj.ops = &mobj_ffa_ops;
78 mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
79 mf->mobj.phys_granule = SMALL_PAGE_SIZE;
80 refcount_set(&mf->mobj.refc, 0);
81
82 return mf;
83 }
84
85 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_new(unsigned int num_pages)86 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
87 {
88 struct mobj_ffa *mf = NULL;
89 uint32_t exceptions = 0;
90 int i = 0;
91
92 mf = ffa_new(num_pages);
93 if (!mf)
94 return NULL;
95
96 exceptions = cpu_spin_lock_xsave(&shm_lock);
97 bit_ffc(shm_bits, NUM_SHMS, &i);
98 if (i != -1) {
99 bit_set(shm_bits, i);
100 /*
101 * Setting bit 44 to use one of the upper 32 bits too for
102 * testing.
103 */
104 mf->cookie = i | FFA_MEMORY_HANDLE_NONE_SECURE_BIT;
105
106 }
107 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
108
109 if (i == -1) {
110 free(mf);
111 return NULL;
112 }
113
114 return mf;
115 }
116 #endif /*CFG_CORE_SEL1_SPMC*/
117
get_page_count(struct mobj_ffa * mf)118 static size_t get_page_count(struct mobj_ffa *mf)
119 {
120 return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
121 }
122
cmp_cookie(struct mobj_ffa * mf,uint64_t cookie)123 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
124 {
125 return mf->cookie == cookie;
126 }
127
cmp_ptr(struct mobj_ffa * mf,uint64_t ptr)128 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
129 {
130 return mf == (void *)(vaddr_t)ptr;
131 }
132
pop_from_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)133 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
134 bool (*cmp_func)(struct mobj_ffa *mf,
135 uint64_t val),
136 uint64_t val)
137 {
138 struct mobj_ffa *mf = SLIST_FIRST(head);
139 struct mobj_ffa *p = NULL;
140
141 if (!mf)
142 return NULL;
143
144 if (cmp_func(mf, val)) {
145 SLIST_REMOVE_HEAD(head, link);
146 return mf;
147 }
148
149 while (true) {
150 p = SLIST_NEXT(mf, link);
151 if (!p)
152 return NULL;
153 if (cmp_func(p, val)) {
154 SLIST_REMOVE_AFTER(mf, link);
155 return p;
156 }
157 mf = p;
158 }
159 }
160
find_in_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)161 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
162 bool (*cmp_func)(struct mobj_ffa *mf,
163 uint64_t val),
164 uint64_t val)
165 {
166 struct mobj_ffa *mf = NULL;
167
168 SLIST_FOREACH(mf, head, link)
169 if (cmp_func(mf, val))
170 return mf;
171
172 return NULL;
173 }
174
175 #if defined(CFG_CORE_SEL1_SPMC)
mobj_ffa_sel1_spmc_delete(struct mobj_ffa * mf)176 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
177 {
178 int i = mf->cookie & ~BIT64(44);
179 uint32_t exceptions = 0;
180
181 assert(i >= 0 && i < NUM_SHMS);
182
183 exceptions = cpu_spin_lock_xsave(&shm_lock);
184 assert(bit_test(shm_bits, i));
185 bit_clear(shm_bits, i);
186 assert(!mf->mm);
187 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
188
189 free(mf);
190 }
191 #else /* !defined(CFG_CORE_SEL1_SPMC) */
mobj_ffa_spmc_new(uint64_t cookie,unsigned int num_pages)192 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
193 {
194 struct mobj_ffa *mf = NULL;
195
196 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
197 mf = ffa_new(num_pages);
198 if (mf)
199 mf->cookie = cookie;
200 return mf;
201 }
202
mobj_ffa_spmc_delete(struct mobj_ffa * mf)203 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
204 {
205 free(mf);
206 }
207 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
208
mobj_ffa_add_pages_at(struct mobj_ffa * mf,unsigned int * idx,paddr_t pa,unsigned int num_pages)209 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
210 paddr_t pa, unsigned int num_pages)
211 {
212 unsigned int n = 0;
213 size_t tot_page_count = get_page_count(mf);
214
215 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
216 return TEE_ERROR_BAD_PARAMETERS;
217
218 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
219 return TEE_ERROR_BAD_PARAMETERS;
220
221 for (n = 0; n < num_pages; n++)
222 mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
223
224 (*idx) += n;
225 return TEE_SUCCESS;
226 }
227
mobj_ffa_get_cookie(struct mobj_ffa * mf)228 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
229 {
230 return mf->cookie;
231 }
232
mobj_ffa_push_to_inactive(struct mobj_ffa * mf)233 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
234 {
235 uint32_t exceptions = 0;
236
237 exceptions = cpu_spin_lock_xsave(&shm_lock);
238 assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
239 assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
240 assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
241 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
242 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
243
244 return mf->cookie;
245 }
246
unmap_helper(struct mobj_ffa * mf)247 static void unmap_helper(struct mobj_ffa *mf)
248 {
249 if (mf->mm) {
250 core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
251 get_page_count(mf));
252 tee_mm_free(mf->mm);
253 mf->mm = NULL;
254 }
255 }
256
257 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)258 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
259 {
260 TEE_Result res = TEE_SUCCESS;
261 struct mobj_ffa *mf = NULL;
262 uint32_t exceptions = 0;
263
264 exceptions = cpu_spin_lock_xsave(&shm_lock);
265 mf = find_in_list(&shm_head, cmp_cookie, cookie);
266 /*
267 * If the mobj is found here it's still active and cannot be
268 * reclaimed.
269 */
270 if (mf) {
271 DMSG("cookie %#"PRIx64" busy refc %u",
272 cookie, refcount_val(&mf->mobj.refc));
273 res = TEE_ERROR_BUSY;
274 goto out;
275 }
276
277 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
278 if (!mf) {
279 res = TEE_ERROR_ITEM_NOT_FOUND;
280 goto out;
281 }
282 /*
283 * If the mobj has been registered via mobj_ffa_get_by_cookie()
284 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
285 */
286 if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
287 DMSG("cookie %#"PRIx64" busy", cookie);
288 res = TEE_ERROR_BUSY;
289 goto out;
290 }
291
292 if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
293 panic();
294 res = TEE_SUCCESS;
295 out:
296 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
297 if (!res)
298 mobj_ffa_sel1_spmc_delete(mf);
299 return res;
300 }
301 #endif /*CFG_CORE_SEL1_SPMC*/
302
mobj_ffa_unregister_by_cookie(uint64_t cookie)303 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
304 {
305 TEE_Result res = TEE_SUCCESS;
306 struct mobj_ffa *mf = NULL;
307 uint32_t exceptions = 0;
308
309 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
310 exceptions = cpu_spin_lock_xsave(&shm_lock);
311 mf = find_in_list(&shm_head, cmp_cookie, cookie);
312 /*
313 * If the mobj is found here it's still active and cannot be
314 * unregistered.
315 */
316 if (mf) {
317 DMSG("cookie %#"PRIx64" busy refc %u",
318 cookie, refcount_val(&mf->mobj.refc));
319 res = TEE_ERROR_BUSY;
320 goto out;
321 }
322 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
323 /*
324 * If the mobj isn't found or if it already has been unregistered.
325 */
326 #if defined(CFG_CORE_SEL1_SPMC)
327 if (!mf || mf->unregistered_by_cookie) {
328 res = TEE_ERROR_ITEM_NOT_FOUND;
329 goto out;
330 }
331 mf->unregistered_by_cookie = true;
332 #else
333 if (!mf) {
334 res = TEE_ERROR_ITEM_NOT_FOUND;
335 goto out;
336 }
337 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
338 mobj_ffa_spmc_delete(mf);
339 thread_spmc_relinquish(cookie);
340 #endif
341 res = TEE_SUCCESS;
342
343 out:
344 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
345 return res;
346 }
347
mobj_ffa_get_by_cookie(uint64_t cookie,unsigned int internal_offs)348 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
349 unsigned int internal_offs)
350 {
351 struct mobj_ffa *mf = NULL;
352 uint32_t exceptions = 0;
353
354 if (internal_offs >= SMALL_PAGE_SIZE)
355 return NULL;
356 exceptions = cpu_spin_lock_xsave(&shm_lock);
357 mf = find_in_list(&shm_head, cmp_cookie, cookie);
358 if (mf) {
359 if (mf->page_offset == internal_offs) {
360 if (!refcount_inc(&mf->mobj.refc)) {
361 /*
362 * If refcount is 0 some other thread has
363 * called mobj_put() on this reached 0 and
364 * before ffa_inactivate() got the lock we
365 * found it. Let's reinitialize it.
366 */
367 refcount_set(&mf->mobj.refc, 1);
368 }
369 DMSG("cookie %#"PRIx64" active: refc %d",
370 cookie, refcount_val(&mf->mobj.refc));
371 } else {
372 EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
373 cookie, mf->page_offset, internal_offs);
374 mf = NULL;
375 }
376 } else {
377 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
378 #if !defined(CFG_CORE_SEL1_SPMC)
379 /* Try to retrieve it from the SPM at S-EL2 */
380 if (mf) {
381 DMSG("cookie %#"PRIx64" resurrecting", cookie);
382 } else {
383 EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
384 cookie);
385 mf = thread_spmc_populate_mobj_from_rx(cookie);
386 }
387 #endif
388 if (mf) {
389 #if defined(CFG_CORE_SEL1_SPMC)
390 mf->unregistered_by_cookie = false;
391 mf->registered_by_cookie = true;
392 #endif
393 assert(refcount_val(&mf->mobj.refc) == 0);
394 refcount_set(&mf->mobj.refc, 1);
395 refcount_set(&mf->mapcount, 0);
396
397 /*
398 * mf->page_offset is offset into the first page.
399 * This offset is assigned from the internal_offs
400 * parameter to this function.
401 *
402 * While a mobj_ffa is active (ref_count > 0) this
403 * will not change, but when being pushed to the
404 * inactive list it can be changed again.
405 *
406 * So below we're backing out the old
407 * mf->page_offset and then assigning a new from
408 * internal_offset.
409 */
410 mf->mobj.size += mf->page_offset;
411 assert(!(mf->mobj.size & SMALL_PAGE_MASK));
412 mf->mobj.size -= internal_offs;
413 mf->page_offset = internal_offs;
414
415 SLIST_INSERT_HEAD(&shm_head, mf, link);
416 }
417 }
418
419 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
420
421 if (!mf) {
422 EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
423 cookie, internal_offs);
424 return NULL;
425 }
426 return &mf->mobj;
427 }
428
ffa_get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)429 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
430 size_t granule, paddr_t *pa)
431 {
432 struct mobj_ffa *mf = to_mobj_ffa(mobj);
433 size_t full_offset = 0;
434 paddr_t p = 0;
435
436 if (!pa)
437 return TEE_ERROR_GENERIC;
438
439 if (offset >= mobj->size)
440 return TEE_ERROR_GENERIC;
441
442 full_offset = offset + mf->page_offset;
443 switch (granule) {
444 case 0:
445 p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
446 (full_offset & SMALL_PAGE_MASK);
447 break;
448 case SMALL_PAGE_SIZE:
449 p = mf->pages[full_offset / SMALL_PAGE_SIZE];
450 break;
451 default:
452 return TEE_ERROR_GENERIC;
453 }
454 *pa = p;
455
456 return TEE_SUCCESS;
457 }
458
ffa_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)459 static size_t ffa_get_phys_offs(struct mobj *mobj,
460 size_t granule __maybe_unused)
461 {
462 assert(granule >= mobj->phys_granule);
463
464 return to_mobj_ffa(mobj)->page_offset;
465 }
466
ffa_get_va(struct mobj * mobj,size_t offset,size_t len)467 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
468 {
469 struct mobj_ffa *mf = to_mobj_ffa(mobj);
470
471 if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
472 return NULL;
473
474 return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
475 }
476
ffa_inactivate(struct mobj * mobj)477 static void ffa_inactivate(struct mobj *mobj)
478 {
479 struct mobj_ffa *mf = to_mobj_ffa(mobj);
480 uint32_t exceptions = 0;
481
482 exceptions = cpu_spin_lock_xsave(&shm_lock);
483 /*
484 * If refcount isn't 0 some other thread has found this mobj in
485 * shm_head after the mobj_put() that put us here and before we got
486 * the lock.
487 */
488 if (refcount_val(&mobj->refc)) {
489 DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
490 goto out;
491 }
492
493 DMSG("cookie %#"PRIx64, mf->cookie);
494 if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
495 panic();
496 unmap_helper(mf);
497 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
498 out:
499 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
500 }
501
ffa_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)502 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
503 {
504 if (!mt)
505 return TEE_ERROR_GENERIC;
506
507 *mt = TEE_MATTR_MEM_TYPE_CACHED;
508
509 return TEE_SUCCESS;
510 }
511
ffa_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)512 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
513 {
514 assert(mobj->ops == &mobj_ffa_ops);
515
516 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
517 }
518
ffa_get_cookie(struct mobj * mobj)519 static uint64_t ffa_get_cookie(struct mobj *mobj)
520 {
521 return to_mobj_ffa(mobj)->cookie;
522 }
523
ffa_inc_map(struct mobj * mobj)524 static TEE_Result ffa_inc_map(struct mobj *mobj)
525 {
526 TEE_Result res = TEE_SUCCESS;
527 struct mobj_ffa *mf = to_mobj_ffa(mobj);
528 uint32_t exceptions = 0;
529 size_t sz = 0;
530
531 while (true) {
532 if (refcount_inc(&mf->mapcount))
533 return TEE_SUCCESS;
534
535 exceptions = cpu_spin_lock_xsave(&shm_lock);
536
537 if (!refcount_val(&mf->mapcount))
538 break; /* continue to reinitialize */
539 /*
540 * If another thread beat us to initialize mapcount,
541 * restart to make sure we still increase it.
542 */
543 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
544 }
545
546 /*
547 * If we have beated another thread calling ffa_dec_map()
548 * to get the lock we need only to reinitialize mapcount to 1.
549 */
550 if (!mf->mm) {
551 sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
552 mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
553 if (!mf->mm) {
554 res = TEE_ERROR_OUT_OF_MEMORY;
555 goto out;
556 }
557
558 res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
559 sz / SMALL_PAGE_SIZE,
560 MEM_AREA_NSEC_SHM);
561 if (res) {
562 tee_mm_free(mf->mm);
563 mf->mm = NULL;
564 goto out;
565 }
566 }
567
568 refcount_set(&mf->mapcount, 1);
569 out:
570 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
571
572 return res;
573 }
574
ffa_dec_map(struct mobj * mobj)575 static TEE_Result ffa_dec_map(struct mobj *mobj)
576 {
577 struct mobj_ffa *mf = to_mobj_ffa(mobj);
578 uint32_t exceptions = 0;
579
580 if (!refcount_dec(&mf->mapcount))
581 return TEE_SUCCESS;
582
583 exceptions = cpu_spin_lock_xsave(&shm_lock);
584 if (!refcount_val(&mf->mapcount))
585 unmap_helper(mf);
586 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
587
588 return TEE_SUCCESS;
589 }
590
mapped_shm_init(void)591 static TEE_Result mapped_shm_init(void)
592 {
593 vaddr_t pool_start = 0;
594 vaddr_t pool_end = 0;
595
596 core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
597 if (!pool_start || !pool_end)
598 panic("Can't find region for shmem pool");
599
600 if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start,
601 SMALL_PAGE_SHIFT,
602 TEE_MM_POOL_NO_FLAGS))
603 panic("Could not create shmem pool");
604
605 DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
606 pool_start, pool_end);
607 return TEE_SUCCESS;
608 }
609
610 static const struct mobj_ops mobj_ffa_ops = {
611 .get_pa = ffa_get_pa,
612 .get_phys_offs = ffa_get_phys_offs,
613 .get_va = ffa_get_va,
614 .get_mem_type = ffa_get_mem_type,
615 .matches = ffa_matches,
616 .free = ffa_inactivate,
617 .get_cookie = ffa_get_cookie,
618 .inc_map = ffa_inc_map,
619 .dec_map = ffa_dec_map,
620 };
621
622 preinit(mapped_shm_init);
623