1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
4 */
5 #include <assert.h>
6 #include <bitstring.h>
7 #include <ffa.h>
8 #include <kernel/spinlock.h>
9 #include <mm/mobj.h>
10 #include <mm/sp_mem.h>
11
12 #define NUM_SHARES 64
13
14 static bitstr_t bit_decl(share_bits, NUM_SHARES);
15 static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
16
17 /* mem_shares stores all active FF-A shares. */
18 SLIST_HEAD(sp_mem_head, sp_mem);
19 static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20 static const struct mobj_ops mobj_sp_ops;
21
22 struct mobj_sp {
23 struct mobj mobj;
24 uint32_t mem_type;
25 bool is_secure;
26 paddr_t pages[];
27 };
28
to_mobj_sp(struct mobj * mobj)29 static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
30 {
31 assert(mobj->ops == &mobj_sp_ops);
32 return container_of(mobj, struct mobj_sp, mobj);
33 }
34
mobj_sp_size(size_t num_pages)35 static size_t mobj_sp_size(size_t num_pages)
36 {
37 size_t s = 0;
38
39 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
40 return 0;
41 if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
42 return 0;
43 return s;
44 }
45
sp_mem_new_mobj(uint64_t pages,uint32_t mem_type,bool is_secure)46 struct mobj *sp_mem_new_mobj(uint64_t pages, uint32_t mem_type, bool is_secure)
47 {
48 struct mobj_sp *m = NULL;
49 size_t s = 0;
50
51 s = mobj_sp_size(pages);
52 if (!s)
53 return NULL;
54
55 m = calloc(1, s);
56 if (!m)
57 return NULL;
58
59 m->mobj.ops = &mobj_sp_ops;
60 m->mobj.size = pages * SMALL_PAGE_SIZE;
61 m->mobj.phys_granule = SMALL_PAGE_SIZE;
62
63 m->mem_type = mem_type;
64 m->is_secure = is_secure;
65
66 refcount_set(&m->mobj.refc, 1);
67 return &m->mobj;
68 }
69
get_page_count(struct mobj_sp * ms)70 static size_t get_page_count(struct mobj_sp *ms)
71 {
72 return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
73 }
74
75 /* Add some physical pages to the mobj object. */
sp_mem_add_pages(struct mobj * mobj,unsigned int * idx,paddr_t pa,unsigned int num_pages)76 int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
77 paddr_t pa, unsigned int num_pages)
78 {
79 struct mobj_sp *ms = to_mobj_sp(mobj);
80 unsigned int n = 0;
81 size_t tot_page_count = get_page_count(ms);
82
83 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
84 return TEE_ERROR_BAD_PARAMETERS;
85
86 /* Don't check for device memory */
87 if (ms->mem_type == TEE_MATTR_MEM_TYPE_CACHED) {
88 if (ms->is_secure) {
89 if (!tee_pbuf_is_sec(pa, num_pages * SMALL_PAGE_SIZE))
90 return TEE_ERROR_BAD_PARAMETERS;
91 } else {
92 if (!tee_pbuf_is_non_sec(pa,
93 num_pages * SMALL_PAGE_SIZE))
94 return TEE_ERROR_BAD_PARAMETERS;
95 }
96 }
97
98 for (n = 0; n < num_pages; n++)
99 ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
100
101 *idx += n;
102 return TEE_SUCCESS;
103 }
104
get_mem_type(struct mobj * mobj,uint32_t * mt)105 static TEE_Result get_mem_type(struct mobj *mobj, uint32_t *mt)
106 {
107 struct mobj_sp *m = to_mobj_sp(mobj);
108
109 *mt = m->mem_type;
110
111 return TEE_SUCCESS;
112 }
113
mobj_sp_matches(struct mobj * mobj,enum buf_is_attr attr)114 static bool mobj_sp_matches(struct mobj *mobj, enum buf_is_attr attr)
115 {
116 struct mobj_sp *m = to_mobj_sp(mobj);
117
118 if (m->is_secure)
119 return attr == CORE_MEM_SEC;
120 else
121 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
122 }
123
get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)124 static TEE_Result get_pa(struct mobj *mobj, size_t offset,
125 size_t granule, paddr_t *pa)
126 {
127 struct mobj_sp *ms = to_mobj_sp(mobj);
128 paddr_t p = 0;
129
130 if (!pa)
131 return TEE_ERROR_GENERIC;
132
133 if (offset >= mobj->size)
134 return TEE_ERROR_GENERIC;
135
136 switch (granule) {
137 case 0:
138 p = ms->pages[offset / SMALL_PAGE_SIZE] +
139 (offset & SMALL_PAGE_MASK);
140 break;
141 case SMALL_PAGE_SIZE:
142 p = ms->pages[offset / SMALL_PAGE_SIZE];
143 break;
144 default:
145 return TEE_ERROR_GENERIC;
146 }
147 *pa = p;
148
149 return TEE_SUCCESS;
150 }
151 DECLARE_KEEP_PAGER(get_pa);
152
get_phys_offs(struct mobj * mobj __maybe_unused,size_t granule __maybe_unused)153 static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
154 size_t granule __maybe_unused)
155 {
156 return 0;
157 }
158
inactivate(struct mobj * mobj)159 static void inactivate(struct mobj *mobj)
160 {
161 struct mobj_sp *ms = to_mobj_sp(mobj);
162 uint32_t exceptions = 0;
163
164 exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
165 /*
166 * If refcount isn't 0 some other thread has found this mobj in
167 * shm_head after the mobj_put() that put us here and before we got
168 * the lock.
169 */
170 if (!refcount_val(&mobj->refc))
171 free(ms);
172
173 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
174 }
175
176 static const struct mobj_ops mobj_sp_ops = {
177 .get_pa = get_pa,
178 .get_phys_offs = get_phys_offs,
179 .get_mem_type = get_mem_type,
180 .matches = mobj_sp_matches,
181 .free = inactivate,
182 };
183
sp_mem_get_receiver(uint32_t s_id,struct sp_mem * smem)184 struct sp_mem_receiver *sp_mem_get_receiver(uint32_t s_id, struct sp_mem *smem)
185 {
186 struct sp_mem_receiver *r = NULL;
187
188 SLIST_FOREACH(r, &smem->receivers, link) {
189 if (r->perm.endpoint_id == s_id)
190 return r;
191 }
192 return NULL;
193 }
194
sp_mem_get(uint64_t handle)195 struct sp_mem *sp_mem_get(uint64_t handle)
196 {
197 struct sp_mem *smem = NULL;
198 uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
199
200 SLIST_FOREACH(smem, &mem_shares, link) {
201 if (smem->global_handle == handle)
202 break;
203 }
204
205 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
206 return smem;
207 }
208
sp_mem_get_va(const struct user_mode_ctx * uctx,size_t offset,struct mobj * mobj)209 void *sp_mem_get_va(const struct user_mode_ctx *uctx, size_t offset,
210 struct mobj *mobj)
211 {
212 struct vm_region *region = NULL;
213
214 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
215 if (region->mobj == mobj && region->offset == offset)
216 return (void *)region->va;
217 }
218 return NULL;
219 }
220
sp_mem_new(void)221 struct sp_mem *sp_mem_new(void)
222 {
223 struct sp_mem *smem = NULL;
224 uint32_t exceptions = 0;
225 int i = 0;
226
227 smem = calloc(sizeof(*smem), 1);
228 if (!smem)
229 return NULL;
230
231 exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
232
233 bit_ffc(share_bits, NUM_SHARES, &i);
234 if (i == -1) {
235 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
236 free(smem);
237 return NULL;
238 }
239
240 bit_set(share_bits, i);
241 /*
242 * OP-TEE SHAREs use bit 44 use bit 45 instead.
243 */
244 smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
245 SLIST_INIT(&smem->regions);
246 SLIST_INIT(&smem->receivers);
247
248 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
249
250 return smem;
251 }
252
sp_mem_add(struct sp_mem * smem)253 void sp_mem_add(struct sp_mem *smem)
254 {
255 uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
256
257 SLIST_INSERT_HEAD(&mem_shares, smem, link);
258
259 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
260 }
261
sp_mem_is_shared(struct sp_mem_map_region * new_reg)262 bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
263 {
264 struct sp_mem *smem = NULL;
265 uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
266 uint64_t new_reg_end = new_reg->page_offset +
267 (new_reg->page_count * SMALL_PAGE_SIZE);
268
269 SLIST_FOREACH(smem, &mem_shares, link) {
270 struct sp_mem_map_region *reg = NULL;
271
272 SLIST_FOREACH(reg, &smem->regions, link) {
273 if (new_reg->mobj == reg->mobj) {
274 uint64_t reg_end = 0;
275
276 reg_end = reg->page_offset +
277 (reg->page_count * SMALL_PAGE_SIZE);
278
279 if (new_reg->page_offset < reg_end &&
280 new_reg_end > reg->page_offset) {
281 cpu_spin_unlock_xrestore(&sp_mem_lock,
282 exceptions);
283 return true;
284 }
285 }
286 }
287 }
288
289 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
290 return false;
291 }
292
sp_mem_remove(struct sp_mem * smem)293 void sp_mem_remove(struct sp_mem *smem)
294 {
295 uint32_t exceptions = 0;
296 int i = 0;
297 struct sp_mem *tsmem = NULL;
298
299 if (!smem)
300 return;
301
302 /* Remove all receivers */
303 while (!SLIST_EMPTY(&smem->receivers)) {
304 struct sp_mem_receiver *receiver = NULL;
305
306 receiver = SLIST_FIRST(&smem->receivers);
307 SLIST_REMOVE_HEAD(&smem->receivers, link);
308 free(receiver);
309 }
310 /* Remove all regions */
311 while (!SLIST_EMPTY(&smem->regions)) {
312 struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
313
314 mobj_put(region->mobj);
315
316 SLIST_REMOVE_HEAD(&smem->regions, link);
317 free(region);
318 }
319
320 exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
321
322 i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
323 assert(i < NUM_SHARES);
324
325 bit_clear(share_bits, i);
326
327 SLIST_FOREACH(tsmem, &mem_shares, link) {
328 if (tsmem == smem) {
329 SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
330 break;
331 }
332 }
333
334 cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
335
336 free(smem);
337 }
338