1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2017-2024, STMicroelectronics
4 * Copyright (c) 2016-2018, Linaro Limited
5 */
6
7 #include <boot_api.h>
8 #include <config.h>
9 #include <console.h>
10 #include <drivers/firewall_device.h>
11 #include <drivers/gic.h>
12 #include <drivers/pinctrl.h>
13 #include <drivers/stm32_bsec.h>
14 #include <drivers/stm32_etzpc.h>
15 #include <drivers/stm32_gpio.h>
16 #include <drivers/stm32_iwdg.h>
17 #include <drivers/stm32_uart.h>
18 #include <drivers/stm32mp_dt_bindings.h>
19 #ifdef CFG_STM32MP15
20 #include <drivers/stm32mp1_rcc.h>
21 #endif
22 #include <io.h>
23 #include <kernel/boot.h>
24 #include <kernel/dt.h>
25 #include <kernel/dt_driver.h>
26 #include <kernel/misc.h>
27 #include <kernel/panic.h>
28 #include <kernel/spinlock.h>
29 #include <kernel/tee_misc.h>
30 #include <libfdt.h>
31 #include <mm/core_memprot.h>
32 #include <platform_config.h>
33 #include <sm/psci.h>
34 #include <stm32_util.h>
35 #include <string.h>
36 #include <trace.h>
37
38 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB1_BASE, APB1_SIZE);
39 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB2_BASE, APB2_SIZE);
40 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB3_BASE, APB3_SIZE);
41 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB4_BASE, APB4_SIZE);
42 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB5_BASE, APB5_SIZE);
43 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB4_BASE, AHB4_SIZE);
44 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB5_BASE, AHB5_SIZE);
45
46 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB1_BASE, APB1_SIZE);
47 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB3_BASE, APB3_SIZE);
48 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB4_BASE, APB4_SIZE);
49 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB5_BASE, APB5_SIZE);
50 #ifdef CFG_STM32MP13
51 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB6_BASE, APB6_SIZE);
52 #endif
53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB4_BASE, AHB4_SIZE);
54 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB5_BASE, AHB5_SIZE);
55 register_phys_mem_pgdir(MEM_AREA_IO_SEC, GIC_BASE, GIC_SIZE);
56
57 register_ddr(DDR_BASE, CFG_DRAM_SIZE);
58
59 #define _ID2STR(id) (#id)
60 #define ID2STR(id) _ID2STR(id)
61
platform_banner(void)62 static TEE_Result platform_banner(void)
63 {
64 IMSG("Platform stm32mp1: flavor %s - DT %s",
65 ID2STR(PLATFORM_FLAVOR),
66 ID2STR(CFG_EMBED_DTB_SOURCE_FILE));
67
68 return TEE_SUCCESS;
69 }
70 service_init(platform_banner);
71
72 /*
73 * Console
74 *
75 * CFG_STM32_EARLY_CONSOLE_UART specifies the ID of the UART used for
76 * trace console. Value 0 disables the early console.
77 *
78 * We cannot use the generic serial_console support since probing
79 * the console requires the platform clock driver to be already
80 * up and ready which is done only once service_init are completed.
81 */
82 static struct stm32_uart_pdata console_data;
83
plat_console_init(void)84 void plat_console_init(void)
85 {
86 /* Early console initialization before MMU setup */
87 struct uart {
88 paddr_t pa;
89 } uarts[] = {
90 [0] = { .pa = 0 },
91 [1] = { .pa = USART1_BASE },
92 [2] = { .pa = USART2_BASE },
93 [3] = { .pa = USART3_BASE },
94 [4] = { .pa = UART4_BASE },
95 [5] = { .pa = UART5_BASE },
96 [6] = { .pa = USART6_BASE },
97 [7] = { .pa = UART7_BASE },
98 [8] = { .pa = UART8_BASE },
99 };
100
101 COMPILE_TIME_ASSERT(ARRAY_SIZE(uarts) > CFG_STM32_EARLY_CONSOLE_UART);
102
103 if (!uarts[CFG_STM32_EARLY_CONSOLE_UART].pa)
104 return;
105
106 /* No clock yet bound to the UART console */
107 console_data.clock = NULL;
108
109 stm32_uart_init(&console_data, uarts[CFG_STM32_EARLY_CONSOLE_UART].pa);
110
111 register_serial_console(&console_data.chip);
112
113 IMSG("Early console on UART#%u", CFG_STM32_EARLY_CONSOLE_UART);
114 }
115
init_console_from_dt(void)116 static TEE_Result init_console_from_dt(void)
117 {
118 struct stm32_uart_pdata *pd = NULL;
119 void *fdt = NULL;
120 int node = 0;
121 TEE_Result res = TEE_ERROR_GENERIC;
122
123 fdt = get_embedded_dt();
124 res = get_console_node_from_dt(fdt, &node, NULL, NULL);
125 if (res == TEE_ERROR_ITEM_NOT_FOUND) {
126 fdt = get_external_dt();
127 res = get_console_node_from_dt(fdt, &node, NULL, NULL);
128 if (res == TEE_ERROR_ITEM_NOT_FOUND)
129 return TEE_SUCCESS;
130 if (res != TEE_SUCCESS)
131 return res;
132 }
133
134 pd = stm32_uart_init_from_dt_node(fdt, node);
135 if (!pd) {
136 IMSG("DTB disables console");
137 register_serial_console(NULL);
138 return TEE_SUCCESS;
139 }
140
141 /* Replace early console with the new one */
142 console_flush();
143 console_data = *pd;
144 register_serial_console(&console_data.chip);
145 IMSG("DTB enables console");
146 free(pd);
147
148 return TEE_SUCCESS;
149 }
150
151 /* Probe console from DT once clock inits (service init level) are completed */
152 service_init_late(init_console_from_dt);
153
154 /*
155 * GIC init, used also for primary/secondary boot core wake completion
156 */
boot_primary_init_intc(void)157 void boot_primary_init_intc(void)
158 {
159 gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET);
160
161 stm32mp_register_online_cpu();
162 }
163
boot_secondary_init_intc(void)164 void boot_secondary_init_intc(void)
165 {
166 gic_init_per_cpu();
167
168 stm32mp_register_online_cpu();
169 }
170
171 #ifdef CFG_STM32MP15
172 /*
173 * This concerns OP-TEE pager for STM32MP1 to use secure internal
174 * RAMs to execute. TZSRAM refers the TZSRAM_BASE/TZSRAM_SIZE
175 * used in boot.c to locate secure unpaged memory.
176 *
177 * STM32MP15 variants embed 640kB of contiguous securable SRAMs
178 *
179 * +--------------+ <-- SYSRAM_BASE
180 * | | lower part can be assigned to secure world
181 * | SYSRAM 256kB | 4kB granule boundary
182 * | | upper part can be assigned to secure world
183 * +--------------+ <-- SRAM1_BASE (= SYSRAM_BASE + SYSRAM_SIZE)
184 | | full range assigned to non-secure world or
185 * | SRAM1 128kB | to secure world, or to- Cortex-M4 exclusive access
186 * +--------------+ <-- SRAM2_BASE (= SRAM1_BASE + SRAM1_SIZE)
187 | | full range assigned to non-secure world or
188 * | SRAM2 128kB | to secure world, or to- Cortex-M4 exclusive access
189 * +--------------+ <-- SRAM3_BASE (= SRAM2_BASE + SRAM2_SIZE)
190 | | full range assigned to non-secure world or
191 * | SRAM3 64kB | to secure world, or to- Cortex-M4 exclusive access
192 * +--------------+ <-- SRAM4_BASE (= SRAM3_BASE + SRAM3_SIZE)
193 | | full range assigned to non-secure world or
194 * | SRAM4 64kB | to secure world, or to- Cortex-M4 exclusive access
195 * +--------------+ <-- SRAM4_BASE + SRAM4_SIZE
196 *
197 * If SRAMx memories are not used for the companion Cortex-M4
198 * processor, OP-TEE can use this memory.
199 *
200 * SYSRAM configuration for secure/non-secure boundaries requires the
201 * secure SYSRAM memory to start at the SYSRAM physical base address and grow
202 * from there while the non-secure SYSRAM range lies at SYSRAM end addresses
203 * with a 4KB page granule.
204 *
205 * SRAM1, SRAM2, SRAM3 and SRAM4 are independently assigned to secure world,
206 * to non-secure world or possibly to Cortex-M4 exclusive access. Each
207 * assignment covers the full related SRAMx memory range.
208 *
209 * Using non-secure SYSRAM or one of the SRAMx for SCMI message communication
210 * can be done using CFG_STM32MP1_SCMI_SHM_BASE/CFG_STM32MP1_SCMI_SHM_SIZE.
211 * This imposes related memory area is assigned to non-secure world.
212
213 * Using secure internal memories (SYSRAM and/or some SRAMx) with STM32MP15
214 * shall meet this constraints known the TZSRAM physical memory range shall
215 * be contiguous.
216 */
217
218 #define SYSRAM_END (SYSRAM_BASE + SYSRAM_SIZE)
219 #define SYSRAM_SEC_END (SYSRAM_BASE + SYSRAM_SEC_SIZE)
220 #define SRAMS_END (SRAM4_BASE + SRAM4_SIZE)
221 #define SRAMS_START SRAM1_BASE
222 #define TZSRAM_END (CFG_TZSRAM_START + CFG_TZSRAM_SIZE)
223
224 #define TZSRAM_FITS_IN_SYSRAM_SEC ((CFG_TZSRAM_START >= SYSRAM_BASE) && \
225 (TZSRAM_END <= SYSRAM_SEC_END))
226
227 #define TZSRAM_FITS_IN_SYSRAM_AND_SRAMS ((CFG_TZSRAM_START >= SYSRAM_BASE) && \
228 (CFG_TZSRAM_START < SYSRAM_END) && \
229 (TZSRAM_END > SYSRAM_END) && \
230 (TZSRAM_END <= SRAMS_END) && \
231 (SYSRAM_SIZE == SYSRAM_SEC_SIZE))
232
233 #define TZSRAM_FITS_IN_SRAMS ((CFG_TZSRAM_START >= SRAMS_START) && \
234 (CFG_TZSRAM_START < SRAMS_END) && \
235 (TZSRAM_END <= SRAMS_END))
236
237 #define TZSRAM_IS_IN_DRAM (CFG_TZSRAM_START >= CFG_DRAM_BASE)
238
239 #ifdef CFG_WITH_PAGER
240 /*
241 * At build time, we enforce that, when pager is used,
242 * either TZSRAM fully fits inside SYSRAM secure address range,
243 * or TZSRAM fully fits inside the full SYSRAM and spread inside SRAMx orderly,
244 * or TZSRAM fully fits some inside SRAMs address range,
245 * or TZSRAM is in DDR for debug and test purpose.
246 */
247 static_assert(TZSRAM_FITS_IN_SYSRAM_SEC || TZSRAM_FITS_IN_SYSRAM_AND_SRAMS ||
248 TZSRAM_FITS_IN_SRAMS || TZSRAM_IS_IN_DRAM);
249 #endif /* CFG_WITH_PAGER */
250 #endif /* CFG_STM32MP15 */
251
secure_pager_ram(struct dt_driver_provider * fw_provider,unsigned int decprot_id,paddr_t base,size_t secure_size)252 static TEE_Result secure_pager_ram(struct dt_driver_provider *fw_provider,
253 unsigned int decprot_id,
254 paddr_t base, size_t secure_size)
255 {
256 /* Lock firewall configuration for secure internal RAMs used by pager */
257 uint32_t query_arg = DECPROT(decprot_id, DECPROT_S_RW, DECPROT_LOCK);
258 struct firewall_query fw_query = {
259 .ctrl = dt_driver_provider_priv_data(fw_provider),
260 .args = &query_arg,
261 .arg_count = 1,
262 };
263 TEE_Result res = TEE_ERROR_GENERIC;
264 bool is_pager_ram = false;
265
266 #if defined(CFG_WITH_PAGER)
267 is_pager_ram = core_is_buffer_intersect(CFG_TZSRAM_START,
268 CFG_TZSRAM_SIZE,
269 base, secure_size);
270 #endif
271 if (!is_pager_ram)
272 return TEE_SUCCESS;
273
274 res = firewall_set_memory_configuration(&fw_query, base, secure_size);
275 if (res)
276 EMSG("Failed to configure secure SRAM %#"PRIxPA"..%#"PRIxPA,
277 base, base + secure_size);
278
279 return res;
280 }
281
non_secure_scmi_ram(struct dt_driver_provider * fw_provider,unsigned int decprot_id,paddr_t base,size_t size)282 static TEE_Result non_secure_scmi_ram(struct dt_driver_provider *fw_provider,
283 unsigned int decprot_id,
284 paddr_t base, size_t size)
285 {
286 /* Do not lock firewall configuration for non-secure internal RAMs */
287 uint32_t query_arg = DECPROT(decprot_id, DECPROT_NS_RW, DECPROT_UNLOCK);
288 struct firewall_query fw_query = {
289 .ctrl = dt_driver_provider_priv_data(fw_provider),
290 .args = &query_arg,
291 .arg_count = 1,
292 };
293 TEE_Result res = TEE_ERROR_GENERIC;
294
295 if (!core_is_buffer_intersect(CFG_STM32MP1_SCMI_SHM_BASE,
296 CFG_STM32MP1_SCMI_SHM_SIZE,
297 base, size))
298 return TEE_SUCCESS;
299
300 res = firewall_set_memory_configuration(&fw_query, base, size);
301 if (res)
302 EMSG("Failed to configure non-secure SRAM %#"PRIxPA"..%#"PRIxPA,
303 base, base + size);
304
305 return res;
306 }
307
308 /* At run time we enforce that SRAM1 to SRAM4 are properly assigned if used */
configure_srams(struct dt_driver_provider * fw_provider)309 static void configure_srams(struct dt_driver_provider *fw_provider)
310 {
311 bool error = false;
312
313 if (IS_ENABLED(CFG_WITH_PAGER)) {
314 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID,
315 SRAM1_BASE, SRAM1_SIZE))
316 error = true;
317
318 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID,
319 SRAM2_BASE, SRAM2_SIZE))
320 error = true;
321
322 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID,
323 SRAM3_BASE, SRAM3_SIZE))
324 error = true;
325
326 #if defined(CFG_STM32MP15)
327 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID,
328 SRAM4_BASE, SRAM4_SIZE))
329 error = true;
330 #endif
331 }
332 if (CFG_STM32MP1_SCMI_SHM_BASE) {
333 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID,
334 SRAM1_BASE, SRAM1_SIZE))
335 error = true;
336
337 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID,
338 SRAM2_BASE, SRAM2_SIZE))
339 error = true;
340
341 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID,
342 SRAM3_BASE, SRAM3_SIZE))
343 error = true;
344
345 #if defined(CFG_STM32MP15)
346 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID,
347 SRAM4_BASE, SRAM4_SIZE))
348 error = true;
349 #endif
350 }
351
352 if (error)
353 panic();
354 }
355
configure_sysram(struct dt_driver_provider * fw_provider)356 static void configure_sysram(struct dt_driver_provider *fw_provider)
357 {
358 uint32_t query_arg = DECPROT(ETZPC_TZMA1_ID, DECPROT_S_RW,
359 DECPROT_UNLOCK);
360 struct firewall_query firewall = {
361 .ctrl = dt_driver_provider_priv_data(fw_provider),
362 .args = &query_arg,
363 .arg_count = 1,
364 };
365 TEE_Result res = TEE_ERROR_GENERIC;
366
367 res = firewall_set_memory_configuration(&firewall, SYSRAM_BASE,
368 SYSRAM_SEC_SIZE);
369 if (res)
370 panic("Unable to secure SYSRAM");
371
372 if (SYSRAM_SIZE > SYSRAM_SEC_SIZE) {
373 size_t nsec_size = SYSRAM_SIZE - SYSRAM_SEC_SIZE;
374 paddr_t nsec_start = SYSRAM_BASE + SYSRAM_SEC_SIZE;
375 uint8_t *va = phys_to_virt(nsec_start, MEM_AREA_IO_NSEC,
376 nsec_size);
377
378 IMSG("Non-secure SYSRAM [%p %p]", va, va + nsec_size - 1);
379
380 /* Clear content from the non-secure part */
381 memset(va, 0, nsec_size);
382 }
383 }
384
init_late_stm32mp1_drivers(void)385 static TEE_Result init_late_stm32mp1_drivers(void)
386 {
387 uint32_t __maybe_unused state = 0;
388
389 /* Configure SYSRAM and SRAMx secure hardening */
390 if (IS_ENABLED(CFG_STM32_ETZPC)) {
391 struct dt_driver_provider *prov = NULL;
392 int node = 0;
393
394 node = fdt_node_offset_by_compatible(get_embedded_dt(), -1,
395 "st,stm32-etzpc");
396 if (node < 0)
397 panic("Could not get ETZPC node");
398
399 prov = dt_driver_get_provider_by_node(node, DT_DRIVER_FIREWALL);
400 assert(prov);
401
402 configure_sysram(prov);
403 configure_srams(prov);
404 }
405
406 #ifdef CFG_STM32MP15
407 /* Device in Secure Closed state require RCC secure hardening */
408 if (stm32_bsec_get_state(&state))
409 panic();
410 if (state == BSEC_STATE_SEC_CLOSED && !stm32_rcc_is_secure())
411 panic("Closed device mandates secure RCC");
412 #endif
413
414 return TEE_SUCCESS;
415 }
416
417 driver_init_late(init_late_stm32mp1_drivers);
418
stm32_rcc_base(void)419 vaddr_t stm32_rcc_base(void)
420 {
421 static struct io_pa_va base = { .pa = RCC_BASE };
422
423 return io_pa_or_va_secure(&base, 1);
424 }
425
get_gicd_base(void)426 vaddr_t get_gicd_base(void)
427 {
428 struct io_pa_va base = { .pa = GIC_BASE + GICD_OFFSET };
429
430 return io_pa_or_va_secure(&base, 1);
431 }
432
stm32mp_get_bsec_static_cfg(struct stm32_bsec_static_cfg * cfg)433 void stm32mp_get_bsec_static_cfg(struct stm32_bsec_static_cfg *cfg)
434 {
435 cfg->base = BSEC_BASE;
436 cfg->upper_start = STM32MP1_UPPER_OTP_START;
437 cfg->max_id = STM32MP1_OTP_MAX_ID;
438 }
439
stm32mp_with_pmic(void)440 bool __weak stm32mp_with_pmic(void)
441 {
442 return false;
443 }
444
may_spin_lock(unsigned int * lock)445 uint32_t may_spin_lock(unsigned int *lock)
446 {
447 if (!lock || !cpu_mmu_enabled())
448 return 0;
449
450 return cpu_spin_lock_xsave(lock);
451 }
452
may_spin_unlock(unsigned int * lock,uint32_t exceptions)453 void may_spin_unlock(unsigned int *lock, uint32_t exceptions)
454 {
455 if (!lock || !cpu_mmu_enabled())
456 return;
457
458 cpu_spin_unlock_xrestore(lock, exceptions);
459 }
460
stm32_tamp_base(void)461 static vaddr_t stm32_tamp_base(void)
462 {
463 static struct io_pa_va base = { .pa = TAMP_BASE };
464
465 return io_pa_or_va_secure(&base, 1);
466 }
467
bkpreg_base(void)468 static vaddr_t bkpreg_base(void)
469 {
470 return stm32_tamp_base() + TAMP_BKP_REGISTER_OFF;
471 }
472
stm32mp_bkpreg(unsigned int idx)473 vaddr_t stm32mp_bkpreg(unsigned int idx)
474 {
475 return bkpreg_base() + (idx * sizeof(uint32_t));
476 }
477
bank_is_valid(unsigned int bank)478 static bool __maybe_unused bank_is_valid(unsigned int bank)
479 {
480 if (IS_ENABLED(CFG_STM32MP15))
481 return bank == GPIO_BANK_Z || bank <= GPIO_BANK_K;
482
483 if (IS_ENABLED(CFG_STM32MP13))
484 return bank <= GPIO_BANK_I;
485
486 panic();
487 }
488
489 #ifdef CFG_STM32_IWDG
stm32_get_iwdg_otp_config(paddr_t pbase,struct stm32_iwdg_otp_data * otp_data)490 TEE_Result stm32_get_iwdg_otp_config(paddr_t pbase,
491 struct stm32_iwdg_otp_data *otp_data)
492 {
493 unsigned int idx = 0;
494 uint32_t otp_id = 0;
495 size_t bit_len = 0;
496 uint8_t bit_offset = 0;
497 uint32_t otp_value = 0;
498
499 switch (pbase) {
500 case IWDG1_BASE:
501 idx = 0;
502 break;
503 case IWDG2_BASE:
504 idx = 1;
505 break;
506 default:
507 panic();
508 }
509
510 if (stm32_bsec_find_otp_in_nvmem_layout("hw2_otp", &otp_id, &bit_offset,
511 &bit_len) ||
512 bit_len != 32 || bit_offset != 0)
513 panic();
514
515 if (stm32_bsec_read_otp(&otp_value, otp_id))
516 panic();
517
518 otp_data->hw_enabled = otp_value &
519 BIT(idx + HW2_OTP_IWDG_HW_ENABLE_SHIFT);
520 otp_data->disable_on_stop = otp_value &
521 BIT(idx + HW2_OTP_IWDG_FZ_STOP_SHIFT);
522 otp_data->disable_on_standby = otp_value &
523 BIT(idx + HW2_OTP_IWDG_FZ_STANDBY_SHIFT);
524
525 return TEE_SUCCESS;
526 }
527 #endif /*CFG_STM32_IWDG*/
528
529 #ifdef CFG_STM32_DEBUG_ACCESS
init_debug(void)530 static TEE_Result init_debug(void)
531 {
532 TEE_Result res = TEE_SUCCESS;
533 uint32_t conf = stm32_bsec_read_debug_conf();
534 struct clk *dbg_clk = stm32mp_rcc_clock_id_to_clk(CK_DBG);
535 uint32_t state = 0;
536
537 res = stm32_bsec_get_state(&state);
538 if (res)
539 return res;
540
541 if (state != BSEC_STATE_SEC_CLOSED && conf) {
542 if (IS_ENABLED(CFG_INSECURE))
543 IMSG("WARNING: All debug accesses are allowed");
544
545 res = stm32_bsec_write_debug_conf(conf | BSEC_DEBUG_ALL);
546 if (res)
547 return res;
548
549 /*
550 * Enable DBG clock as used to access coprocessor
551 * debug registers
552 */
553 clk_enable(dbg_clk);
554 }
555
556 return TEE_SUCCESS;
557 }
558 early_init_late(init_debug);
559 #endif /* CFG_STM32_DEBUG_ACCESS */
560
561 /* Some generic resources need to be unpaged */
562 DECLARE_KEEP_PAGER(pinctrl_apply_state);
563
stm32mp_allow_probe_shared_device(const void * fdt,int node)564 bool stm32mp_allow_probe_shared_device(const void *fdt, int node)
565 {
566 static int uart_console_node = -1;
567 const char *compat = NULL;
568 static bool once;
569
570 if (IS_ENABLED(CFG_STM32_ALLOW_UNSAFE_PROBE))
571 return true;
572
573 if (!once) {
574 get_console_node_from_dt((void *)fdt, &uart_console_node,
575 NULL, NULL);
576 once = true;
577 }
578
579 compat = fdt_stringlist_get(fdt, node, "compatible", 0, NULL);
580
581 /*
582 * Allow OP-TEE console and MP15 I2C and RNG to be shared
583 * with non-secure world.
584 */
585 if (node == uart_console_node ||
586 !strcmp(compat, "st,stm32mp15-i2c-non-secure") ||
587 (!strcmp(compat, "st,stm32-rng") &&
588 IS_ENABLED(CFG_WITH_SOFTWARE_PRNG)))
589 return true;
590
591 return false;
592 }
593
594 #if defined(CFG_STM32MP15) && defined(CFG_WITH_PAGER)
stm32mp1_pa_or_sram_alias_pa(paddr_t pa)595 paddr_t stm32mp1_pa_or_sram_alias_pa(paddr_t pa)
596 {
597 /*
598 * OP-TEE uses the alias physical addresses of SRAM1/2/3/4,
599 * not the standard physical addresses. This choice was initially
600 * driven by pager that needs physically contiguous memories
601 * for internal secure memories.
602 */
603 if (core_is_buffer_inside(pa, 1, SRAM1_ALT_BASE, SRAM1_SIZE))
604 pa += SRAM1_BASE - SRAM1_ALT_BASE;
605 else if (core_is_buffer_inside(pa, 1, SRAM2_ALT_BASE, SRAM2_SIZE))
606 pa += SRAM2_BASE - SRAM2_ALT_BASE;
607 else if (core_is_buffer_inside(pa, 1, SRAM3_ALT_BASE, SRAM3_SIZE))
608 pa += SRAM3_BASE - SRAM3_ALT_BASE;
609 else if (core_is_buffer_inside(pa, 1, SRAM4_ALT_BASE, SRAM4_SIZE))
610 pa += SRAM4_BASE - SRAM4_ALT_BASE;
611
612 return pa;
613 }
614
stm32mp1_ram_intersect_pager_ram(paddr_t base,size_t size)615 bool stm32mp1_ram_intersect_pager_ram(paddr_t base, size_t size)
616 {
617 base = stm32mp1_pa_or_sram_alias_pa(base);
618
619 return core_is_buffer_intersect(base, size, CFG_TZSRAM_START,
620 CFG_TZSRAM_SIZE);
621 }
622 #endif
623