1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */
3
4 #include <compiler.h>
5 #include <platform_config.h>
6 #include <kernel/boot.h>
7 #include <kernel/linker.h>
8 #include <kernel/mutex.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_memprot.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_mm.h>
17 #include <platform_config.h>
18 #include <sm/optee_smc.h>
19 #include <string.h>
20 #include <util.h>
21
22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
23
24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
25 LIST_HEAD_INITIALIZER(prtn_list_head);
26
27 /* Free pages used for guest partitions */
28 tee_mm_pool_t virt_mapper_pool __nex_bss;
29
30 /* Memory used by OP-TEE core */
31 struct tee_mmap_region *kmemory_map __nex_bss;
32
33 struct guest_partition {
34 LIST_ENTRY(guest_partition) link;
35 struct mmu_partition *mmu_prtn;
36 struct tee_mmap_region *memory_map;
37 struct mutex mutex;
38 void *tables_va;
39 tee_mm_entry_t *tee_ram;
40 tee_mm_entry_t *ta_ram;
41 tee_mm_entry_t *tables;
42 bool runtime_initialized;
43 uint16_t id;
44 struct refcount refc;
45 };
46
47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
48
get_current_prtn(void)49 static struct guest_partition *get_current_prtn(void)
50 {
51 struct guest_partition *ret;
52 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
53
54 ret = current_partition[get_core_pos()];
55
56 thread_unmask_exceptions(exceptions);
57
58 return ret;
59 }
60
set_current_prtn(struct guest_partition * prtn)61 static void set_current_prtn(struct guest_partition *prtn)
62 {
63 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
64
65 current_partition[get_core_pos()] = prtn;
66
67 thread_unmask_exceptions(exceptions);
68 }
69
get_ta_ram_size(void)70 static size_t get_ta_ram_size(void)
71 {
72 return ROUNDDOWN(TA_RAM_SIZE / CFG_VIRT_GUEST_COUNT -
73 VCORE_UNPG_RW_SZ -
74 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
75 }
76
prepare_memory_map(paddr_t tee_data,paddr_t ta_ram)77 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
78 paddr_t ta_ram)
79 {
80 int i, entries;
81 vaddr_t max_va = 0;
82 struct tee_mmap_region *map;
83 /*
84 * This function assumes that at time of operation,
85 * kmemory_map (aka static_memory_map from core_mmu.c)
86 * will not be altered. This is true, because all
87 * changes to static_memory_map are done during
88 * OP-TEE initialization, while this function will
89 * called when hypervisor creates a guest.
90 */
91
92 /* Count number of entries in nexus memory map */
93 for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
94 map++, entries++)
95 ;
96
97 /* Allocate entries for virtual guest map */
98 map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
99 if (!map)
100 return NULL;
101
102 memcpy(map, kmemory_map, sizeof(*map) * entries);
103
104 /* Map TEE .data and .bss sections */
105 for (i = 0; i < entries; i++) {
106 if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
107 map[i].type = MEM_AREA_TEE_RAM_RW;
108 map[i].attr = core_mmu_type_to_attr(map[i].type);
109 map[i].pa = tee_data;
110 }
111 if (map[i].va + map[i].size > max_va)
112 max_va = map[i].va + map[i].size;
113 }
114
115 /* Map TA_RAM */
116 assert(map[entries - 1].type == MEM_AREA_END);
117 map[entries] = map[entries - 1];
118 map[entries - 1].region_size = SMALL_PAGE_SIZE;
119 map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
120 map[entries - 1].va +=
121 (ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
122 map[entries - 1].pa = ta_ram;
123 map[entries - 1].size = get_ta_ram_size();
124 map[entries - 1].type = MEM_AREA_TA_RAM;
125 map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
126
127 DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA));
128
129 for (i = 0; i < entries; i++)
130 DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
131 teecore_memtype_name(map[i].type),
132 map[i].region_size, map[i].pa, map[i].va,
133 map[i].size, map[i].attr);
134 return map;
135 }
136
virt_init_memory(struct tee_mmap_region * memory_map)137 void virt_init_memory(struct tee_mmap_region *memory_map)
138 {
139 struct tee_mmap_region *map;
140
141 /* Init page pool that covers all secure RAM */
142 if (!tee_mm_init(&virt_mapper_pool, TEE_RAM_START,
143 TA_RAM_START + TA_RAM_SIZE - TEE_RAM_START,
144 SMALL_PAGE_SHIFT,
145 TEE_MM_POOL_NEX_MALLOC))
146 panic("Can't create pool with free pages");
147 DMSG("Created virtual mapper pool from %x to %x",
148 TEE_RAM_START, TA_RAM_START + TA_RAM_SIZE);
149
150 /* Carve out areas that are used by OP-TEE core */
151 for (map = memory_map; map->type != MEM_AREA_END; map++) {
152 switch (map->type) {
153 case MEM_AREA_TEE_RAM_RX:
154 case MEM_AREA_TEE_RAM_RO:
155 case MEM_AREA_NEX_RAM_RO:
156 case MEM_AREA_NEX_RAM_RW:
157 DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
158 map->type, map->pa, map->pa + map->size);
159 if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
160 map->size))
161 panic("Can't carve out used area");
162 break;
163 default:
164 continue;
165 }
166 }
167
168 kmemory_map = memory_map;
169 }
170
171
configure_guest_prtn_mem(struct guest_partition * prtn)172 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
173 {
174 TEE_Result res = TEE_SUCCESS;
175 paddr_t original_data_pa = 0;
176
177 prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
178 if (!prtn->tee_ram) {
179 EMSG("Can't allocate memory for TEE runtime context");
180 res = TEE_ERROR_OUT_OF_MEMORY;
181 goto err;
182 }
183 DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
184
185 prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
186 if (!prtn->ta_ram) {
187 EMSG("Can't allocate memory for TA data");
188 res = TEE_ERROR_OUT_OF_MEMORY;
189 goto err;
190 }
191 DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
192
193 prtn->tables = tee_mm_alloc(&virt_mapper_pool,
194 core_mmu_get_total_pages_size());
195 if (!prtn->tables) {
196 EMSG("Can't allocate memory for page tables");
197 res = TEE_ERROR_OUT_OF_MEMORY;
198 goto err;
199 }
200
201 prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
202 MEM_AREA_SEC_RAM_OVERALL,
203 core_mmu_get_total_pages_size());
204 assert(prtn->tables_va);
205
206 prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
207 if (!prtn->mmu_prtn) {
208 res = TEE_ERROR_OUT_OF_MEMORY;
209 goto err;
210 }
211
212 prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
213 tee_mm_get_smem(prtn->ta_ram));
214 if (!prtn->memory_map) {
215 res = TEE_ERROR_OUT_OF_MEMORY;
216 goto err;
217 }
218
219 core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
220
221 original_data_pa = virt_to_phys(__data_start);
222 /* Switch to guest's mappings */
223 core_mmu_set_prtn(prtn->mmu_prtn);
224
225 /* clear .bss */
226 memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
227
228 /* copy .data section from R/O original */
229 memcpy(__data_start,
230 phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
231 __data_end - __data_start),
232 __data_end - __data_start);
233
234 return TEE_SUCCESS;
235
236 err:
237 if (prtn->tee_ram)
238 tee_mm_free(prtn->tee_ram);
239 if (prtn->ta_ram)
240 tee_mm_free(prtn->ta_ram);
241 if (prtn->tables)
242 tee_mm_free(prtn->tables);
243 nex_free(prtn->mmu_prtn);
244 nex_free(prtn->memory_map);
245
246 return res;
247 }
248
virt_guest_created(uint16_t guest_id)249 TEE_Result virt_guest_created(uint16_t guest_id)
250 {
251 struct guest_partition *prtn = NULL;
252 TEE_Result res = TEE_SUCCESS;
253 uint32_t exceptions = 0;
254
255 prtn = nex_calloc(1, sizeof(*prtn));
256 if (!prtn)
257 return TEE_ERROR_OUT_OF_MEMORY;
258
259 prtn->id = guest_id;
260 mutex_init(&prtn->mutex);
261 refcount_set(&prtn->refc, 1);
262 res = configure_guest_prtn_mem(prtn);
263 if (res) {
264 nex_free(prtn);
265 return res;
266 }
267
268 set_current_prtn(prtn);
269
270 /* Initialize threads */
271 thread_init_threads();
272 /* Do the preinitcalls */
273 call_preinitcalls();
274
275 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
276 LIST_INSERT_HEAD(&prtn_list, prtn, link);
277 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
278
279 IMSG("Added guest %d", guest_id);
280
281 set_current_prtn(NULL);
282 core_mmu_set_default_prtn();
283
284 return TEE_SUCCESS;
285 }
286
virt_guest_destroyed(uint16_t guest_id)287 TEE_Result virt_guest_destroyed(uint16_t guest_id)
288 {
289 struct guest_partition *prtn;
290 uint32_t exceptions;
291
292 IMSG("Removing guest %d", guest_id);
293
294 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
295
296 LIST_FOREACH(prtn, &prtn_list, link) {
297 if (prtn->id == guest_id) {
298 LIST_REMOVE(prtn, link);
299 break;
300 }
301 }
302 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
303
304 if (prtn) {
305 if (!refcount_dec(&prtn->refc)) {
306 EMSG("Guest thread(s) is still running. refc = %d",
307 refcount_val(&prtn->refc));
308 panic();
309 }
310
311 tee_mm_free(prtn->tee_ram);
312 tee_mm_free(prtn->ta_ram);
313 tee_mm_free(prtn->tables);
314 core_free_mmu_prtn(prtn->mmu_prtn);
315 nex_free(prtn->memory_map);
316 nex_free(prtn);
317 } else
318 EMSG("Client with id %d is not found", guest_id);
319
320 return TEE_SUCCESS;
321 }
322
virt_set_guest(uint16_t guest_id)323 TEE_Result virt_set_guest(uint16_t guest_id)
324 {
325 struct guest_partition *prtn;
326 uint32_t exceptions;
327
328 prtn = get_current_prtn();
329
330 /* This can be true only if we return from IRQ RPC */
331 if (prtn && prtn->id == guest_id)
332 return TEE_SUCCESS;
333
334 if (prtn)
335 panic("Virtual guest partition is already set");
336
337 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
338 LIST_FOREACH(prtn, &prtn_list, link) {
339 if (prtn->id == guest_id) {
340 set_current_prtn(prtn);
341 core_mmu_set_prtn(prtn->mmu_prtn);
342 refcount_inc(&prtn->refc);
343 cpu_spin_unlock_xrestore(&prtn_list_lock,
344 exceptions);
345 return TEE_SUCCESS;
346 }
347 }
348 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
349
350 if (guest_id == HYP_CLNT_ID)
351 return TEE_SUCCESS;
352 return TEE_ERROR_ITEM_NOT_FOUND;
353 }
354
virt_unset_guest(void)355 void virt_unset_guest(void)
356 {
357 struct guest_partition *prtn = get_current_prtn();
358
359 if (!prtn)
360 return;
361
362 set_current_prtn(NULL);
363 core_mmu_set_default_prtn();
364 if (refcount_dec(&prtn->refc))
365 panic();
366 }
367
virt_on_stdcall(void)368 void virt_on_stdcall(void)
369 {
370 struct guest_partition *prtn = get_current_prtn();
371
372 /* Initialize runtime on first std call */
373 if (!prtn->runtime_initialized) {
374 mutex_lock(&prtn->mutex);
375 if (!prtn->runtime_initialized) {
376 init_tee_runtime();
377 prtn->runtime_initialized = true;
378 }
379 mutex_unlock(&prtn->mutex);
380 }
381 }
382
virt_get_memory_map(void)383 struct tee_mmap_region *virt_get_memory_map(void)
384 {
385 struct guest_partition *prtn;
386
387 prtn = get_current_prtn();
388
389 if (!prtn)
390 return NULL;
391
392 return prtn->memory_map;
393 }
394
virt_get_ta_ram(vaddr_t * start,vaddr_t * end)395 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
396 {
397 struct guest_partition *prtn = get_current_prtn();
398
399 *start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
400 MEM_AREA_TA_RAM,
401 tee_mm_get_bytes(prtn->ta_ram));
402 *end = *start + tee_mm_get_bytes(prtn->ta_ram);
403 }
404