1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <kernel/dt.h>
10 #include <kernel/dt_driver.h>
11 #include <kernel/interrupt.h>
12 #include <libfdt.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <trace.h>
18
19 static struct dt_descriptor external_dt __nex_bss;
20
21 #if defined(CFG_CORE_FFA)
22 static void *manifest_dt __nex_bss;
23 #endif
24
dt_find_compatible_driver(const void * fdt,int offs)25 const struct dt_driver *dt_find_compatible_driver(const void *fdt, int offs)
26 {
27 const struct dt_device_match *dm;
28 const struct dt_driver *drv;
29
30 for_each_dt_driver(drv) {
31 for (dm = drv->match_table; dm; dm++) {
32 if (!dm->compatible) {
33 break;
34 }
35 if (!fdt_node_check_compatible(fdt, offs,
36 dm->compatible)) {
37 return drv;
38 }
39 }
40 }
41
42 return NULL;
43 }
44
dt_have_prop(const void * fdt,int offs,const char * propname)45 bool dt_have_prop(const void *fdt, int offs, const char *propname)
46 {
47 const void *prop;
48
49 prop = fdt_getprop(fdt, offs, propname, NULL);
50
51 return prop;
52 }
53
dt_disable_status(void * fdt,int node)54 int dt_disable_status(void *fdt, int node)
55 {
56 const char *prop = NULL;
57 int len = 0;
58
59 prop = fdt_getprop(fdt, node, "status", &len);
60 if (!prop) {
61 if (fdt_setprop_string(fdt, node, "status", "disabled"))
62 return -1;
63 } else {
64 /*
65 * Status is there, modify it.
66 * Ask to set "disabled" value to the property. The value
67 * will be automatically truncated with "len" size by the
68 * fdt_setprop_inplace function.
69 * Setting a value different from "ok" or "okay" will disable
70 * the property.
71 * Setting a truncated value of "disabled" with the original
72 * property "len" is preferred to not increase the DT size and
73 * losing time in recalculating the overall DT offsets.
74 * If original length of the status property is larger than
75 * "disabled", the property will start with "disabled" and be
76 * completed with the rest of the original property.
77 */
78 if (fdt_setprop_inplace(fdt, node, "status", "disabled", len))
79 return -1;
80 }
81
82 return 0;
83 }
84
dt_enable_secure_status(void * fdt,int node)85 int dt_enable_secure_status(void *fdt, int node)
86 {
87 if (dt_disable_status(fdt, node)) {
88 EMSG("Unable to disable Normal Status");
89 return -1;
90 }
91
92 if (fdt_setprop_string(fdt, node, "secure-status", "okay"))
93 return -1;
94
95 return 0;
96 }
97
dt_map_dev(const void * fdt,int offs,vaddr_t * base,size_t * size,enum dt_map_dev_directive mapping)98 int dt_map_dev(const void *fdt, int offs, vaddr_t *base, size_t *size,
99 enum dt_map_dev_directive mapping)
100 {
101 enum teecore_memtypes mtype;
102 paddr_t pbase;
103 vaddr_t vbase;
104 size_t sz;
105 int st;
106
107 assert(cpu_mmu_enabled());
108
109 st = fdt_get_status(fdt, offs);
110 if (st == DT_STATUS_DISABLED)
111 return -1;
112
113 if (fdt_reg_info(fdt, offs, &pbase, &sz))
114 return -1;
115
116 switch (mapping) {
117 case DT_MAP_AUTO:
118 if ((st & DT_STATUS_OK_SEC) && !(st & DT_STATUS_OK_NSEC))
119 mtype = MEM_AREA_IO_SEC;
120 else
121 mtype = MEM_AREA_IO_NSEC;
122 break;
123 case DT_MAP_SECURE:
124 mtype = MEM_AREA_IO_SEC;
125 break;
126 case DT_MAP_NON_SECURE:
127 mtype = MEM_AREA_IO_NSEC;
128 break;
129 default:
130 panic("Invalid mapping specified");
131 break;
132 }
133
134 /* Check if we have a mapping, create one if needed */
135 vbase = (vaddr_t)core_mmu_add_mapping(mtype, pbase, sz);
136 if (!vbase) {
137 EMSG("Failed to map %zu bytes at PA 0x%"PRIxPA,
138 (size_t)sz, pbase);
139 return -1;
140 }
141
142 *base = vbase;
143 *size = sz;
144 return 0;
145 }
146
147 /* Read a physical address (n=1 or 2 cells) */
fdt_read_paddr(const uint32_t * cell,int n)148 static paddr_t fdt_read_paddr(const uint32_t *cell, int n)
149 {
150 paddr_t addr;
151
152 if (n < 1 || n > 2)
153 goto bad;
154
155 addr = fdt32_to_cpu(*cell);
156 cell++;
157 if (n == 2) {
158 #ifdef ARM32
159 if (addr) {
160 /* High order 32 bits can't be nonzero */
161 goto bad;
162 }
163 addr = fdt32_to_cpu(*cell);
164 #else
165 addr = (addr << 32) | fdt32_to_cpu(*cell);
166 #endif
167 }
168
169 return addr;
170 bad:
171 return DT_INFO_INVALID_REG;
172
173 }
174
fdt_read_size(const uint32_t * cell,int n)175 static size_t fdt_read_size(const uint32_t *cell, int n)
176 {
177 uint32_t sz = 0;
178
179 sz = fdt32_to_cpu(*cell);
180 if (n == 2) {
181 if (sz)
182 return DT_INFO_INVALID_REG_SIZE;
183
184 cell++;
185 sz = fdt32_to_cpu(*cell);
186 }
187
188 return sz;
189 }
190
fdt_reg_info(const void * fdt,int offs,paddr_t * base,size_t * size)191 int fdt_reg_info(const void *fdt, int offs, paddr_t *base, size_t *size)
192 {
193 const fdt32_t *reg = NULL;
194 int addr_ncells = 0;
195 int size_ncells = 0;
196 int parent = 0;
197 int len = 0;
198
199 reg = (const uint32_t *)fdt_getprop(fdt, offs, "reg", &len);
200 if (!reg)
201 return -FDT_ERR_NOTFOUND;
202
203 if (fdt_find_cached_parent_reg_cells(fdt, offs, &addr_ncells,
204 &size_ncells) != 0) {
205 parent = fdt_parent_offset(fdt, offs);
206 if (parent < 0)
207 return -FDT_ERR_NOTFOUND;
208
209 addr_ncells = fdt_address_cells(fdt, parent);
210 if (addr_ncells < 0)
211 return -FDT_ERR_NOTFOUND;
212
213 size_ncells = fdt_size_cells(fdt, parent);
214 if (size_ncells < 0)
215 return -FDT_ERR_NOTFOUND;
216 }
217
218 if ((size_t)len < addr_ncells * sizeof(*reg))
219 return -FDT_ERR_BADSTRUCTURE;
220
221 if (base) {
222 *base = fdt_read_paddr(reg, addr_ncells);
223 if (*base == DT_INFO_INVALID_REG)
224 return -FDT_ERR_NOTFOUND;
225 }
226
227 if (size) {
228 if ((size_t)len < (addr_ncells + size_ncells) * sizeof(*reg))
229 return -FDT_ERR_BADSTRUCTURE;
230
231 *size = fdt_read_size(reg + addr_ncells, size_ncells);
232 if (*size == DT_INFO_INVALID_REG_SIZE)
233 return -FDT_ERR_NOTFOUND;
234 }
235
236 return 0;
237 }
238
fdt_reg_base_address(const void * fdt,int offs)239 paddr_t fdt_reg_base_address(const void *fdt, int offs)
240 {
241 paddr_t base = 0;
242
243 if (fdt_reg_info(fdt, offs, &base, NULL))
244 return DT_INFO_INVALID_REG;
245
246 return base;
247 }
248
fdt_reg_size(const void * fdt,int offs)249 size_t fdt_reg_size(const void *fdt, int offs)
250 {
251 size_t size = 0;
252
253 if (fdt_reg_info(fdt, offs, NULL, &size))
254 return DT_INFO_INVALID_REG_SIZE;
255
256 return size;
257 }
258
is_okay(const char * st,int len)259 static bool is_okay(const char *st, int len)
260 {
261 return !strncmp(st, "ok", len) || !strncmp(st, "okay", len);
262 }
263
fdt_get_status(const void * fdt,int offs)264 int fdt_get_status(const void *fdt, int offs)
265 {
266 const char *prop;
267 int st = 0;
268 int len;
269
270 prop = fdt_getprop(fdt, offs, "status", &len);
271 if (!prop || is_okay(prop, len)) {
272 /* If status is not specified, it defaults to "okay" */
273 st |= DT_STATUS_OK_NSEC;
274 }
275
276 prop = fdt_getprop(fdt, offs, "secure-status", &len);
277 if (!prop) {
278 /*
279 * When secure-status is not specified it defaults to the same
280 * value as status
281 */
282 if (st & DT_STATUS_OK_NSEC)
283 st |= DT_STATUS_OK_SEC;
284 } else {
285 if (is_okay(prop, len))
286 st |= DT_STATUS_OK_SEC;
287 }
288
289 return st;
290 }
291
fdt_fill_device_info(const void * fdt,struct dt_node_info * info,int offs)292 void fdt_fill_device_info(const void *fdt, struct dt_node_info *info, int offs)
293 {
294 struct dt_node_info dinfo = {
295 .reg = DT_INFO_INVALID_REG,
296 .reg_size = DT_INFO_INVALID_REG_SIZE,
297 .clock = DT_INFO_INVALID_CLOCK,
298 .reset = DT_INFO_INVALID_RESET,
299 .interrupt = DT_INFO_INVALID_INTERRUPT,
300 };
301 const fdt32_t *cuint = NULL;
302
303 /* Intentionally discard fdt_reg_info() return value */
304 fdt_reg_info(fdt, offs, &dinfo.reg, &dinfo.reg_size);
305
306 cuint = fdt_getprop(fdt, offs, "clocks", NULL);
307 if (cuint) {
308 cuint++;
309 dinfo.clock = (int)fdt32_to_cpu(*cuint);
310 }
311
312 cuint = fdt_getprop(fdt, offs, "resets", NULL);
313 if (cuint) {
314 cuint++;
315 dinfo.reset = (int)fdt32_to_cpu(*cuint);
316 }
317
318 dinfo.interrupt = dt_get_irq_type_prio(fdt, offs, &dinfo.type,
319 &dinfo.prio);
320
321 dinfo.status = fdt_get_status(fdt, offs);
322
323 *info = dinfo;
324 }
325
fdt_read_uint32_array(const void * fdt,int node,const char * prop_name,uint32_t * array,size_t count)326 int fdt_read_uint32_array(const void *fdt, int node, const char *prop_name,
327 uint32_t *array, size_t count)
328 {
329 const fdt32_t *cuint = NULL;
330 int len = 0;
331 uint32_t i = 0;
332
333 cuint = fdt_getprop(fdt, node, prop_name, &len);
334 if (!cuint)
335 return len;
336
337 if ((uint32_t)len != (count * sizeof(uint32_t)))
338 return -FDT_ERR_BADLAYOUT;
339
340 for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
341 *array = fdt32_to_cpu(*cuint);
342 array++;
343 cuint++;
344 }
345
346 return 0;
347 }
348
fdt_read_uint32_index(const void * fdt,int node,const char * prop_name,int index,uint32_t * value)349 int fdt_read_uint32_index(const void *fdt, int node, const char *prop_name,
350 int index, uint32_t *value)
351 {
352 const fdt32_t *cuint = NULL;
353 int len = 0;
354
355 cuint = fdt_getprop(fdt, node, prop_name, &len);
356 if (!cuint)
357 return len;
358
359 if ((uint32_t)len < (sizeof(uint32_t) * (index + 1)))
360 return -FDT_ERR_BADLAYOUT;
361
362 *value = fdt32_to_cpu(cuint[index]);
363
364 return 0;
365 }
366
fdt_read_uint32(const void * fdt,int node,const char * prop_name,uint32_t * value)367 int fdt_read_uint32(const void *fdt, int node, const char *prop_name,
368 uint32_t *value)
369 {
370 return fdt_read_uint32_array(fdt, node, prop_name, value, 1);
371 }
372
fdt_read_uint32_default(const void * fdt,int node,const char * prop_name,uint32_t dflt_value)373 uint32_t fdt_read_uint32_default(const void *fdt, int node,
374 const char *prop_name, uint32_t dflt_value)
375 {
376 uint32_t ret = dflt_value;
377
378 fdt_read_uint32_index(fdt, node, prop_name, 0, &ret);
379
380 return ret;
381 }
382
fdt_get_reg_props_by_index(const void * fdt,int node,int index,paddr_t * base,size_t * size)383 int fdt_get_reg_props_by_index(const void *fdt, int node, int index,
384 paddr_t *base, size_t *size)
385 {
386 const fdt32_t *prop = NULL;
387 int parent = 0;
388 int len = 0;
389 int address_cells = 0;
390 int size_cells = 0;
391 int cell = 0;
392
393 parent = fdt_parent_offset(fdt, node);
394 if (parent < 0)
395 return parent;
396
397 address_cells = fdt_address_cells(fdt, parent);
398 if (address_cells < 0)
399 return address_cells;
400
401 size_cells = fdt_size_cells(fdt, parent);
402 if (size_cells < 0)
403 return size_cells;
404
405 cell = index * (address_cells + size_cells);
406
407 prop = fdt_getprop(fdt, node, "reg", &len);
408 if (!prop)
409 return len;
410
411 if (((cell + address_cells + size_cells) * (int)sizeof(uint32_t)) > len)
412 return -FDT_ERR_BADVALUE;
413
414 if (base) {
415 *base = fdt_read_paddr(&prop[cell], address_cells);
416 if (*base == DT_INFO_INVALID_REG)
417 return -FDT_ERR_BADVALUE;
418 }
419
420 if (size) {
421 *size = fdt_read_size(&prop[cell + address_cells], size_cells);
422 if (*size == DT_INFO_INVALID_REG_SIZE)
423 return -FDT_ERR_BADVALUE;
424 }
425
426 return 0;
427 }
428
fdt_get_reg_props_by_name(const void * fdt,int node,const char * name,paddr_t * base,size_t * size)429 int fdt_get_reg_props_by_name(const void *fdt, int node, const char *name,
430 paddr_t *base, size_t *size)
431 {
432 int index = 0;
433
434 index = fdt_stringlist_search(fdt, node, "reg-names", name);
435 if (index < 0)
436 return index;
437
438 return fdt_get_reg_props_by_index(fdt, node, index, base, size);
439 }
440
dt_getprop_as_number(const void * fdt,int nodeoffset,const char * name,uint64_t * num)441 int dt_getprop_as_number(const void *fdt, int nodeoffset, const char *name,
442 uint64_t *num)
443 {
444 const void *prop = NULL;
445 int len = 0;
446
447 prop = fdt_getprop(fdt, nodeoffset, name, &len);
448 if (!prop)
449 return len;
450
451 switch (len) {
452 case sizeof(uint32_t):
453 *num = fdt32_ld(prop);
454 return 0;
455 case sizeof(uint64_t):
456 *num = fdt64_ld(prop);
457 return 0;
458 default:
459 return -FDT_ERR_BADVALUE;
460 }
461 }
462
get_dt(void)463 void *get_dt(void)
464 {
465 void *fdt = get_embedded_dt();
466
467 if (!fdt)
468 fdt = get_external_dt();
469
470 if (!fdt)
471 fdt = get_manifest_dt();
472
473 return fdt;
474 }
475
get_secure_dt(void)476 void *get_secure_dt(void)
477 {
478 void *fdt = get_embedded_dt();
479
480 if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
481 fdt = get_external_dt();
482
483 if (!fdt)
484 fdt = get_manifest_dt();
485
486 return fdt;
487 }
488
489 #if defined(CFG_EMBED_DTB)
490 #ifdef CFG_DT_CACHED_NODE_INFO
491 /*
492 * struct cached_node - Cached information of a DT node
493 *
494 * @node_offset: Offset of the node in @cached_node_info_fdt
495 * @parent_offset: Offset of @node_offset parent node
496 * @address_cells: #address-cells property value of the parent node or 0
497 * @size_cells: #size-cells property value of the parent node or 0
498 * @phandle: Phandle associated to the node or 0 if none
499 */
500 struct cached_node {
501 int node_offset;
502 int parent_offset;
503 int8_t address_cells;
504 int8_t size_cells;
505 uint32_t phandle;
506 };
507
508 /*
509 * struct dt_node_cache - Reference to cached information of DT nodes
510 *
511 * @array: Array of the cached node
512 * @count: Number of initialized cells in @array
513 * @alloced_count: Number of allocated cells in @array
514 * @fdt: Reference to the FDT for which node information are cached
515 */
516 struct dt_node_cache {
517 struct cached_node *array;
518 size_t count;
519 size_t alloced_count;
520 const void *fdt;
521 };
522
523 static struct dt_node_cache *dt_node_cache;
524
fdt_node_info_are_cached(const void * fdt)525 static bool fdt_node_info_are_cached(const void *fdt)
526 {
527 return dt_node_cache && dt_node_cache->fdt == fdt;
528 }
529
find_cached_parent_node(const void * fdt,int node_offset)530 static struct cached_node *find_cached_parent_node(const void *fdt,
531 int node_offset)
532 {
533 struct cached_node *cell = NULL;
534 size_t n = 0;
535
536 if (!fdt_node_info_are_cached(fdt))
537 return NULL;
538
539 for (n = 0; n < dt_node_cache->count; n++)
540 if (dt_node_cache->array[n].node_offset == node_offset)
541 cell = dt_node_cache->array + n;
542
543 return cell;
544 }
545
fdt_find_cached_parent_node(const void * fdt,int node_offset,int * parent_offset)546 int fdt_find_cached_parent_node(const void *fdt, int node_offset,
547 int *parent_offset)
548 {
549 struct cached_node *cell = NULL;
550
551 cell = find_cached_parent_node(fdt, node_offset);
552 if (!cell)
553 return -FDT_ERR_NOTFOUND;
554
555 *parent_offset = cell->parent_offset;
556
557 return 0;
558 }
559
fdt_find_cached_parent_reg_cells(const void * fdt,int node_offset,int * address_cells,int * size_cells)560 int fdt_find_cached_parent_reg_cells(const void *fdt, int node_offset,
561 int *address_cells, int *size_cells)
562 {
563 struct cached_node *cell = NULL;
564 int rc = 0;
565
566 cell = find_cached_parent_node(fdt, node_offset);
567 if (!cell)
568 return -FDT_ERR_NOTFOUND;
569
570 if (address_cells) {
571 if (cell->address_cells >= 0)
572 *address_cells = cell->address_cells;
573 else
574 rc = -FDT_ERR_NOTFOUND;
575 }
576
577 if (size_cells) {
578 if (cell->size_cells >= 0)
579 *size_cells = cell->size_cells;
580 else
581 rc = -FDT_ERR_NOTFOUND;
582 }
583
584 return rc;
585 }
586
fdt_find_cached_node_phandle(const void * fdt,uint32_t phandle,int * node_offset)587 int fdt_find_cached_node_phandle(const void *fdt, uint32_t phandle,
588 int *node_offset)
589 {
590 struct cached_node *cell = NULL;
591 size_t n = 0;
592
593 if (!fdt_node_info_are_cached(fdt))
594 return -FDT_ERR_NOTFOUND;
595
596 for (n = 0; n < dt_node_cache->count; n++)
597 if (dt_node_cache->array[n].phandle == phandle)
598 cell = dt_node_cache->array + n;
599
600 if (!cell)
601 return -FDT_ERR_NOTFOUND;
602
603 *node_offset = cell->node_offset;
604
605 return 0;
606 }
607
realloc_cached_node_array(void)608 static TEE_Result realloc_cached_node_array(void)
609 {
610 assert(dt_node_cache);
611
612 if (dt_node_cache->count + 1 > dt_node_cache->alloced_count) {
613 size_t new_count = dt_node_cache->alloced_count * 2;
614 struct cached_node *new = NULL;
615
616 if (!new_count)
617 new_count = 4;
618
619 new = realloc(dt_node_cache->array,
620 sizeof(*dt_node_cache->array) * new_count);
621 if (!new)
622 return TEE_ERROR_OUT_OF_MEMORY;
623
624 dt_node_cache->array = new;
625 dt_node_cache->alloced_count = new_count;
626 }
627
628 return TEE_SUCCESS;
629 }
630
add_cached_node(int parent_offset,int node_offset,int address_cells,int size_cells)631 static TEE_Result add_cached_node(int parent_offset,
632 int node_offset, int address_cells,
633 int size_cells)
634 {
635 TEE_Result res = TEE_ERROR_GENERIC;
636
637 res = realloc_cached_node_array();
638 if (res)
639 return res;
640
641 dt_node_cache->array[dt_node_cache->count] = (struct cached_node){
642 .node_offset = node_offset,
643 .parent_offset = parent_offset,
644 .address_cells = address_cells,
645 .size_cells = size_cells,
646 .phandle = fdt_get_phandle(dt_node_cache->fdt, node_offset),
647 };
648
649 dt_node_cache->count++;
650
651 return TEE_SUCCESS;
652 }
653
add_cached_node_subtree(int node_offset)654 static TEE_Result add_cached_node_subtree(int node_offset)
655 {
656 TEE_Result res = TEE_ERROR_GENERIC;
657 const fdt32_t *cuint = NULL;
658 int subnode_offset = 0;
659 int8_t addr_cells = -1;
660 int8_t size_cells = -1;
661
662 cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#address-cells",
663 NULL);
664 if (cuint)
665 addr_cells = (int)fdt32_to_cpu(*cuint);
666
667 cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#size-cells",
668 NULL);
669 if (cuint)
670 size_cells = (int)fdt32_to_cpu(*cuint);
671
672 fdt_for_each_subnode(subnode_offset, dt_node_cache->fdt, node_offset) {
673 res = add_cached_node(node_offset, subnode_offset, addr_cells,
674 size_cells);
675 if (res)
676 return res;
677
678 res = add_cached_node_subtree(subnode_offset);
679 if (res)
680 return res;
681 }
682
683 return TEE_SUCCESS;
684 }
685
release_node_cache_info(void)686 static TEE_Result release_node_cache_info(void)
687 {
688 if (dt_node_cache) {
689 free(dt_node_cache->array);
690 free(dt_node_cache);
691 dt_node_cache = NULL;
692 }
693
694 return TEE_SUCCESS;
695 }
696
697 release_init_resource(release_node_cache_info);
698
init_node_cache_info(const void * fdt)699 static void init_node_cache_info(const void *fdt)
700 {
701 TEE_Result res = TEE_ERROR_GENERIC;
702
703 assert(!dt_node_cache);
704
705 dt_node_cache = calloc(1, sizeof(*dt_node_cache));
706 if (dt_node_cache) {
707 dt_node_cache->fdt = fdt;
708 res = add_cached_node_subtree(0);
709 } else {
710 res = TEE_ERROR_OUT_OF_MEMORY;
711 }
712
713 if (res) {
714 EMSG("Error %#"PRIx32", disable DT cached info", res);
715 release_node_cache_info();
716 }
717 }
718 #else
init_node_cache_info(const void * fdt __unused)719 static void init_node_cache_info(const void *fdt __unused)
720 {
721 }
722 #endif /* CFG_DT_CACHED_NODE_INFO */
723
get_embedded_dt(void)724 void *get_embedded_dt(void)
725 {
726 static bool checked;
727
728 assert(cpu_mmu_enabled());
729
730 if (!checked) {
731 IMSG("Embedded DTB found");
732
733 if (fdt_check_header(embedded_secure_dtb))
734 panic("Invalid embedded DTB");
735
736 checked = true;
737
738 init_node_cache_info(embedded_secure_dtb);
739 }
740
741 return embedded_secure_dtb;
742 }
743 #else
get_embedded_dt(void)744 void *get_embedded_dt(void)
745 {
746 return NULL;
747 }
748 #endif /*CFG_EMBED_DTB*/
749
750 #ifdef _CFG_USE_DTB_OVERLAY
add_dt_overlay_fragment(struct dt_descriptor * dt,int ioffs)751 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
752 {
753 char frag[32] = { };
754 int offs = 0;
755 int ret = 0;
756
757 ret = snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
758 if (ret < 0 || (size_t)ret >= sizeof(frag))
759 return -1;
760
761 offs = fdt_add_subnode(dt->blob, ioffs, frag);
762 if (offs < 0)
763 return offs;
764
765 dt->frag_id += 1;
766
767 ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
768 if (ret < 0)
769 return ret;
770
771 return fdt_add_subnode(dt->blob, offs, "__overlay__");
772 }
773
init_dt_overlay(struct dt_descriptor * dt,int __maybe_unused dt_size)774 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
775 {
776 int fragment = 0;
777
778 if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
779 if (!fdt_check_header(dt->blob)) {
780 fdt_for_each_subnode(fragment, dt->blob, 0)
781 dt->frag_id += 1;
782 return 0;
783 }
784 }
785
786 return fdt_create_empty_tree(dt->blob, dt_size);
787 }
788 #else
add_dt_overlay_fragment(struct dt_descriptor * dt __unused,int offs)789 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
790 {
791 return offs;
792 }
793
init_dt_overlay(struct dt_descriptor * dt __unused,int dt_size __unused)794 static int init_dt_overlay(struct dt_descriptor *dt __unused,
795 int dt_size __unused)
796 {
797 return 0;
798 }
799 #endif /* _CFG_USE_DTB_OVERLAY */
800
get_external_dt_desc(void)801 struct dt_descriptor *get_external_dt_desc(void)
802 {
803 if (!IS_ENABLED(CFG_EXTERNAL_DT))
804 return NULL;
805
806 return &external_dt;
807 }
808
init_external_dt(unsigned long phys_dt,size_t dt_sz)809 void init_external_dt(unsigned long phys_dt, size_t dt_sz)
810 {
811 struct dt_descriptor *dt = &external_dt;
812 int ret = 0;
813 enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
814
815 if (!IS_ENABLED(CFG_EXTERNAL_DT))
816 return;
817
818 if (!phys_dt || !dt_sz) {
819 /*
820 * No need to panic as we're not using the DT in OP-TEE
821 * yet, we're only adding some nodes for normal world use.
822 * This makes the switch to using DT easier as we can boot
823 * a newer OP-TEE with older boot loaders. Once we start to
824 * initialize devices based on DT we'll likely panic
825 * instead of returning here.
826 */
827 IMSG("No non-secure external DT");
828 return;
829 }
830
831 mtype = core_mmu_get_type_by_pa(phys_dt);
832 if (mtype == MEM_AREA_MAXTYPE) {
833 /* Map the DTB if it is not yet mapped */
834 dt->blob = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt,
835 dt_sz);
836 if (!dt->blob)
837 panic("Failed to map external DTB");
838 } else {
839 /* Get the DTB address if already mapped in a memory area */
840 dt->blob = phys_to_virt(phys_dt, mtype, dt_sz);
841 if (!dt->blob) {
842 EMSG("Failed to get a mapped external DTB for PA %#lx",
843 phys_dt);
844 panic();
845 }
846 }
847
848 ret = init_dt_overlay(dt, dt_sz);
849 if (ret < 0) {
850 EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
851 ret);
852 panic();
853 }
854
855 ret = fdt_open_into(dt->blob, dt->blob, dt_sz);
856 if (ret < 0) {
857 EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
858 panic();
859 }
860
861 IMSG("Non-secure external DT found");
862 }
863
get_external_dt(void)864 void *get_external_dt(void)
865 {
866 if (!IS_ENABLED(CFG_EXTERNAL_DT))
867 return NULL;
868
869 assert(cpu_mmu_enabled());
870 return external_dt.blob;
871 }
872
release_external_dt(void)873 static TEE_Result release_external_dt(void)
874 {
875 int ret = 0;
876 paddr_t pa_dt = 0;
877
878 if (!IS_ENABLED(CFG_EXTERNAL_DT))
879 return TEE_SUCCESS;
880
881 if (!external_dt.blob)
882 return TEE_SUCCESS;
883
884 pa_dt = virt_to_phys(external_dt.blob);
885 /*
886 * Skip packing and un-mapping operations if the external DTB is mapped
887 * in a different memory area
888 */
889 if (core_mmu_get_type_by_pa(pa_dt) != MEM_AREA_EXT_DT)
890 return TEE_SUCCESS;
891
892 ret = fdt_pack(external_dt.blob);
893 if (ret < 0) {
894 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
895 virt_to_phys(external_dt.blob), ret);
896 panic();
897 }
898
899 if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
900 CFG_DTB_MAX_SIZE))
901 panic("Failed to remove temporary Device Tree mapping");
902
903 /* External DTB no more reached, reset pointer to invalid */
904 external_dt.blob = NULL;
905
906 return TEE_SUCCESS;
907 }
908
909 boot_final(release_external_dt);
910
add_dt_path_subnode(struct dt_descriptor * dt,const char * path,const char * subnode)911 int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
912 const char *subnode)
913 {
914 int offs = 0;
915
916 offs = fdt_path_offset(dt->blob, path);
917 if (offs < 0)
918 return offs;
919 offs = add_dt_overlay_fragment(dt, offs);
920 if (offs < 0)
921 return offs;
922 return fdt_add_subnode(dt->blob, offs, subnode);
923 }
924
set_dt_val(void * data,uint32_t cell_size,uint64_t val)925 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
926 {
927 if (cell_size == 1) {
928 fdt32_t v = cpu_to_fdt32((uint32_t)val);
929
930 memcpy(data, &v, sizeof(v));
931 } else {
932 fdt64_t v = cpu_to_fdt64(val);
933
934 memcpy(data, &v, sizeof(v));
935 }
936 }
937
add_res_mem_dt_node(struct dt_descriptor * dt,const char * name,paddr_t pa,size_t size)938 int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
939 paddr_t pa, size_t size)
940 {
941 int offs = 0;
942 int ret = 0;
943 int addr_size = -1;
944 int len_size = -1;
945 bool found = true;
946 char subnode_name[80] = { };
947
948 offs = fdt_path_offset(dt->blob, "/reserved-memory");
949
950 if (offs < 0) {
951 found = false;
952 offs = 0;
953 }
954
955 if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
956 len_size = sizeof(paddr_t) / sizeof(uint32_t);
957 addr_size = sizeof(paddr_t) / sizeof(uint32_t);
958 } else {
959 len_size = fdt_size_cells(dt->blob, offs);
960 if (len_size < 0)
961 return len_size;
962 addr_size = fdt_address_cells(dt->blob, offs);
963 if (addr_size < 0)
964 return addr_size;
965 }
966
967 if (!found) {
968 offs = add_dt_path_subnode(dt, "/", "reserved-memory");
969 if (offs < 0)
970 return offs;
971 ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
972 addr_size);
973 if (ret < 0)
974 return ret;
975 ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
976 if (ret < 0)
977 return ret;
978 ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
979 if (ret < 0)
980 return ret;
981 }
982
983 ret = snprintf(subnode_name, sizeof(subnode_name),
984 "%s@%" PRIxPA, name, pa);
985 if (ret < 0 || ret >= (int)sizeof(subnode_name))
986 DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
987 offs = fdt_add_subnode(dt->blob, offs, subnode_name);
988 if (offs >= 0) {
989 uint32_t data[FDT_MAX_NCELLS * 2] = { };
990
991 set_dt_val(data, addr_size, pa);
992 set_dt_val(data + addr_size, len_size, size);
993 ret = fdt_setprop(dt->blob, offs, "reg", data,
994 sizeof(uint32_t) * (addr_size + len_size));
995 if (ret < 0)
996 return ret;
997 ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
998 if (ret < 0)
999 return ret;
1000 } else {
1001 return offs;
1002 }
1003 return 0;
1004 }
1005
1006 #if defined(CFG_CORE_FFA)
init_manifest_dt(void * fdt)1007 void init_manifest_dt(void *fdt)
1008 {
1009 manifest_dt = fdt;
1010 }
1011
reinit_manifest_dt(void)1012 void reinit_manifest_dt(void)
1013 {
1014 paddr_t pa = (unsigned long)manifest_dt;
1015 void *fdt = NULL;
1016 int ret = 0;
1017
1018 if (!pa) {
1019 EMSG("No manifest DT found");
1020 return;
1021 }
1022
1023 fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
1024 if (!fdt)
1025 panic("Failed to map manifest DT");
1026
1027 manifest_dt = fdt;
1028
1029 ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
1030 if (ret < 0) {
1031 EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1032 panic();
1033 }
1034
1035 IMSG("manifest DT found");
1036 }
1037
get_manifest_dt(void)1038 void *get_manifest_dt(void)
1039 {
1040 return manifest_dt;
1041 }
1042
release_manifest_dt(void)1043 static TEE_Result release_manifest_dt(void)
1044 {
1045 if (!manifest_dt)
1046 return TEE_SUCCESS;
1047
1048 if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1049 CFG_DTB_MAX_SIZE))
1050 panic("Failed to remove temporary manifest DT mapping");
1051 manifest_dt = NULL;
1052
1053 return TEE_SUCCESS;
1054 }
1055
1056 boot_final(release_manifest_dt);
1057 #else
init_manifest_dt(void * fdt __unused)1058 void init_manifest_dt(void *fdt __unused)
1059 {
1060 }
1061
reinit_manifest_dt(void)1062 void reinit_manifest_dt(void)
1063 {
1064 }
1065
get_manifest_dt(void)1066 void *get_manifest_dt(void)
1067 {
1068 return NULL;
1069 }
1070 #endif /*CFG_CORE_FFA*/
1071