1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2021 NXP
4  */
5 
6 #include <asm/io.h>
7 #include <asm/arch/clock.h>
8 #include <asm/arch/imx-regs.h>
9 #include <asm/arch/sys_proto.h>
10 #include <asm/armv8/mmu.h>
11 #include <asm/mach-imx/boot_mode.h>
12 #include <asm/global_data.h>
13 #include <efi_loader.h>
14 #include <event.h>
15 #include <spl.h>
16 #include <asm/arch/rdc.h>
17 #include <asm/mach-imx/s400_api.h>
18 #include <asm/mach-imx/mu_hal.h>
19 #include <cpu_func.h>
20 #include <asm/setup.h>
21 #include <dm.h>
22 #include <dm/device-internal.h>
23 #include <dm/lists.h>
24 #include <dm/uclass.h>
25 #include <dm/device.h>
26 #include <dm/uclass-internal.h>
27 #include <fuse.h>
28 #include <thermal.h>
29 #include <linux/iopoll.h>
30 #include <env.h>
31 #include <env_internal.h>
32 
33 DECLARE_GLOBAL_DATA_PTR;
34 
35 struct rom_api *g_rom_api = (struct rom_api *)0x1980;
36 
is_usb_boot(void)37 bool is_usb_boot(void)
38 {
39 	return get_boot_device() == USB_BOOT;
40 }
41 
42 #ifdef CONFIG_ENV_IS_IN_MMC
board_mmc_get_env_dev(int devno)43 __weak int board_mmc_get_env_dev(int devno)
44 {
45 	return devno;
46 }
47 
mmc_get_env_dev(void)48 int mmc_get_env_dev(void)
49 {
50 	int ret;
51 	u32 boot;
52 	u16 boot_type;
53 	u8 boot_instance;
54 
55 	ret = rom_api_query_boot_infor(QUERY_BT_DEV, &boot);
56 
57 	if (ret != ROM_API_OKAY) {
58 		puts("ROMAPI: failure at query_boot_info\n");
59 		return CONFIG_SYS_MMC_ENV_DEV;
60 	}
61 
62 	boot_type = boot >> 16;
63 	boot_instance = (boot >> 8) & 0xff;
64 
65 	/* If not boot from sd/mmc, use default value */
66 	if (boot_type != BOOT_TYPE_SD && boot_type != BOOT_TYPE_MMC)
67 		return env_get_ulong("mmcdev", 10, CONFIG_SYS_MMC_ENV_DEV);
68 
69 	return board_mmc_get_env_dev(boot_instance);
70 }
71 #endif
72 
set_cpu_info(struct sentinel_get_info_data * info)73 static void set_cpu_info(struct sentinel_get_info_data *info)
74 {
75 	gd->arch.soc_rev = info->soc;
76 	gd->arch.lifecycle = info->lc;
77 	memcpy((void *)&gd->arch.uid, &info->uid, 4 * sizeof(u32));
78 }
79 
get_cpu_rev(void)80 u32 get_cpu_rev(void)
81 {
82 	u32 rev = (gd->arch.soc_rev >> 24) - 0xa0;
83 
84 	return (MXC_CPU_IMX8ULP << 12) | (CHIP_REV_1_0 + rev);
85 }
86 
get_boot_mode(void)87 enum bt_mode get_boot_mode(void)
88 {
89 	u32 bt0_cfg = 0;
90 
91 	bt0_cfg = readl(SIM_SEC_BASE_ADDR + 0x24);
92 	bt0_cfg &= (BT0CFG_LPBOOT_MASK | BT0CFG_DUALBOOT_MASK);
93 
94 	if (!(bt0_cfg & BT0CFG_LPBOOT_MASK)) {
95 		/* No low power boot */
96 		if (bt0_cfg & BT0CFG_DUALBOOT_MASK)
97 			return DUAL_BOOT;
98 		else
99 			return SINGLE_BOOT;
100 	}
101 
102 	return LOW_POWER_BOOT;
103 }
104 
m33_image_booted(void)105 bool m33_image_booted(void)
106 {
107 	if (IS_ENABLED(CONFIG_SPL_BUILD)) {
108 		u32 gp6 = 0;
109 
110 		/* DGO_GP6 */
111 		gp6 = readl(SIM_SEC_BASE_ADDR + 0x28);
112 		if (gp6 & BIT(5))
113 			return true;
114 
115 		return false;
116 	} else {
117 		u32 gpr0 = readl(SIM1_BASE_ADDR);
118 		if (gpr0 & BIT(0))
119 			return true;
120 
121 		return false;
122 	}
123 }
124 
rdc_enabled_in_boot(void)125 bool rdc_enabled_in_boot(void)
126 {
127 	if (IS_ENABLED(CONFIG_SPL_BUILD)) {
128 		u32 val = 0;
129 		int ret;
130 		bool rdc_en = true; /* Default assume DBD_EN is set */
131 
132 		/* Read DBD_EN fuse */
133 		ret = fuse_read(8, 1, &val);
134 		if (!ret)
135 			rdc_en = !!(val & 0x200); /* only A1 part uses DBD_EN, so check DBD_EN new place*/
136 
137 		return rdc_en;
138 	} else {
139 		u32 gpr0 = readl(SIM1_BASE_ADDR);
140 		if (gpr0 & 0x2)
141 			return true;
142 
143 		return false;
144 	}
145 }
146 
spl_pass_boot_info(void)147 static void spl_pass_boot_info(void)
148 {
149 	if (IS_ENABLED(CONFIG_SPL_BUILD)) {
150 		bool m33_booted = m33_image_booted();
151 		bool rdc_en = rdc_enabled_in_boot();
152 		u32 val = 0;
153 
154 		if (m33_booted)
155 			val |= 0x1;
156 
157 		if (rdc_en)
158 			val |= 0x2;
159 
160 		writel(val, SIM1_BASE_ADDR);
161 	}
162 }
163 
is_m33_handshake_necessary(void)164 bool is_m33_handshake_necessary(void)
165 {
166 	/* Only need handshake in u-boot */
167 	if (!IS_ENABLED(CONFIG_SPL_BUILD))
168 		return (m33_image_booted() || rdc_enabled_in_boot());
169 	else
170 		return false;
171 }
172 
m33_image_handshake(ulong timeout_ms)173 int m33_image_handshake(ulong timeout_ms)
174 {
175 	u32 fsr;
176 	int ret;
177 	ulong timeout_us = timeout_ms * 1000;
178 
179 	/* Notify m33 that it's ready to do init srtm(enable mu receive interrupt and so on) */
180 	setbits_le32(MU0_B_BASE_ADDR + 0x100, BIT(0)); /* set FCR F0 flag of MU0_MUB */
181 
182 	/*
183 	 * Wait m33 to set FCR F0 flag of MU0_MUA
184 	 * Clear FCR F0 flag of MU0_MUB after m33 has set FCR F0 flag of MU0_MUA
185 	 */
186 	ret = readl_poll_sleep_timeout(MU0_B_BASE_ADDR + 0x104, fsr, fsr & BIT(0), 10, timeout_us);
187 	if (!ret)
188 		clrbits_le32(MU0_B_BASE_ADDR + 0x100, BIT(0));
189 
190 	return ret;
191 }
192 
193 #define CMC_SRS_TAMPER                    BIT(31)
194 #define CMC_SRS_SECURITY                  BIT(30)
195 #define CMC_SRS_TZWDG                     BIT(29)
196 #define CMC_SRS_JTAG_RST                  BIT(28)
197 #define CMC_SRS_CORE1                     BIT(16)
198 #define CMC_SRS_LOCKUP                    BIT(15)
199 #define CMC_SRS_SW                        BIT(14)
200 #define CMC_SRS_WDG                       BIT(13)
201 #define CMC_SRS_PIN_RESET                 BIT(8)
202 #define CMC_SRS_WARM                      BIT(4)
203 #define CMC_SRS_HVD                       BIT(3)
204 #define CMC_SRS_LVD                       BIT(2)
205 #define CMC_SRS_POR                       BIT(1)
206 #define CMC_SRS_WUP                       BIT(0)
207 
get_reset_cause(char * ret)208 static char *get_reset_cause(char *ret)
209 {
210 	u32 cause1, cause = 0, srs = 0;
211 	void __iomem *reg_ssrs = (void __iomem *)(CMC1_BASE_ADDR + 0x88);
212 	void __iomem *reg_srs = (void __iomem *)(CMC1_BASE_ADDR + 0x80);
213 
214 	if (!ret)
215 		return "null";
216 
217 	srs = readl(reg_srs);
218 	cause1 = readl(reg_ssrs);
219 
220 	cause = srs & (CMC_SRS_POR | CMC_SRS_WUP | CMC_SRS_WARM);
221 
222 	switch (cause) {
223 	case CMC_SRS_POR:
224 		sprintf(ret, "%s", "POR");
225 		break;
226 	case CMC_SRS_WUP:
227 		sprintf(ret, "%s", "WUP");
228 		break;
229 	case CMC_SRS_WARM:
230 		cause = srs & (CMC_SRS_WDG | CMC_SRS_SW |
231 			CMC_SRS_JTAG_RST);
232 		switch (cause) {
233 		case CMC_SRS_WDG:
234 			sprintf(ret, "%s", "WARM-WDG");
235 			break;
236 		case CMC_SRS_SW:
237 			sprintf(ret, "%s", "WARM-SW");
238 			break;
239 		case CMC_SRS_JTAG_RST:
240 			sprintf(ret, "%s", "WARM-JTAG");
241 			break;
242 		default:
243 			sprintf(ret, "%s", "WARM-UNKN");
244 			break;
245 		}
246 		break;
247 	default:
248 		sprintf(ret, "%s-%X", "UNKN", srs);
249 		break;
250 	}
251 
252 	debug("[%X] SRS[%X] %X - ", cause1, srs, srs ^ cause1);
253 	return ret;
254 }
255 
256 #if defined(CONFIG_DISPLAY_CPUINFO)
get_imx_type(u32 imxtype)257 const char *get_imx_type(u32 imxtype)
258 {
259 	return "8ULP";
260 }
261 
print_cpuinfo(void)262 int print_cpuinfo(void)
263 {
264 	u32 cpurev;
265 	char cause[18];
266 
267 	cpurev = get_cpu_rev();
268 
269 	printf("CPU:   i.MX%s rev%d.%d at %d MHz\n",
270 	       get_imx_type((cpurev & 0xFF000) >> 12),
271 	       (cpurev & 0x000F0) >> 4, (cpurev & 0x0000F) >> 0,
272 	       mxc_get_clock(MXC_ARM_CLK) / 1000000);
273 
274 #if defined(CONFIG_IMX_PMC_TEMPERATURE)
275 	struct udevice *udev;
276 	int ret, temp;
277 
278 	ret = uclass_get_device(UCLASS_THERMAL, 0, &udev);
279 	if (!ret) {
280 		ret = thermal_get_temp(udev, &temp);
281 		if (!ret)
282 			printf("CPU current temperature: %d\n", temp);
283 		else
284 			debug(" - failed to get CPU current temperature\n");
285 	} else {
286 		debug(" - failed to get CPU current temperature\n");
287 	}
288 #endif
289 
290 	printf("Reset cause: %s\n", get_reset_cause(cause));
291 
292 	printf("Boot mode: ");
293 	switch (get_boot_mode()) {
294 	case LOW_POWER_BOOT:
295 		printf("Low power boot\n");
296 		break;
297 	case DUAL_BOOT:
298 		printf("Dual boot\n");
299 		break;
300 	case SINGLE_BOOT:
301 	default:
302 		printf("Single boot\n");
303 		break;
304 	}
305 
306 	return 0;
307 }
308 #endif
309 
310 #define UNLOCK_WORD0 0xC520 /* 1st unlock word */
311 #define UNLOCK_WORD1 0xD928 /* 2nd unlock word */
312 #define REFRESH_WORD0 0xA602 /* 1st refresh word */
313 #define REFRESH_WORD1 0xB480 /* 2nd refresh word */
314 
disable_wdog(void __iomem * wdog_base)315 static void disable_wdog(void __iomem *wdog_base)
316 {
317 	u32 val_cs = readl(wdog_base + 0x00);
318 
319 	if (!(val_cs & 0x80))
320 		return;
321 
322 	dmb();
323 	__raw_writel(REFRESH_WORD0, (wdog_base + 0x04)); /* Refresh the CNT */
324 	__raw_writel(REFRESH_WORD1, (wdog_base + 0x04));
325 	dmb();
326 
327 	if (!(val_cs & 800)) {
328 		dmb();
329 		__raw_writel(UNLOCK_WORD0, (wdog_base + 0x04));
330 		__raw_writel(UNLOCK_WORD1, (wdog_base + 0x04));
331 		dmb();
332 
333 		while (!(readl(wdog_base + 0x00) & 0x800))
334 			;
335 	}
336 	writel(0x0, (wdog_base + 0x0C)); /* Set WIN to 0 */
337 	writel(0x400, (wdog_base + 0x08)); /* Set timeout to default 0x400 */
338 	writel(0x120, (wdog_base + 0x00)); /* Disable it and set update */
339 
340 	while (!(readl(wdog_base + 0x00) & 0x400))
341 		;
342 }
343 
init_wdog(void)344 void init_wdog(void)
345 {
346 	disable_wdog((void __iomem *)WDG3_RBASE);
347 }
348 
349 static struct mm_region imx8ulp_arm64_mem_map[] = {
350 	{
351 		/* ROM */
352 		.virt = 0x0,
353 		.phys = 0x0,
354 		.size = 0x40000UL,
355 		.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
356 			 PTE_BLOCK_OUTER_SHARE
357 	},
358 	{
359 		/* FLEXSPI0 */
360 		.virt = 0x04000000,
361 		.phys = 0x04000000,
362 		.size = 0x08000000UL,
363 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
364 			 PTE_BLOCK_NON_SHARE |
365 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
366 	},
367 	{
368 		/* SSRAM (align with 2M) */
369 		.virt = 0x1FE00000UL,
370 		.phys = 0x1FE00000UL,
371 		.size = 0x400000UL,
372 		.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
373 			 PTE_BLOCK_OUTER_SHARE |
374 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
375 	}, {
376 		/* SRAM1 (align with 2M) */
377 		.virt = 0x21000000UL,
378 		.phys = 0x21000000UL,
379 		.size = 0x200000UL,
380 		.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
381 			 PTE_BLOCK_OUTER_SHARE |
382 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
383 	}, {
384 		/* SRAM0 (align with 2M) */
385 		.virt = 0x22000000UL,
386 		.phys = 0x22000000UL,
387 		.size = 0x200000UL,
388 		.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
389 			 PTE_BLOCK_OUTER_SHARE |
390 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
391 	}, {
392 		/* Peripherals */
393 		.virt = 0x27000000UL,
394 		.phys = 0x27000000UL,
395 		.size = 0x3000000UL,
396 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
397 			 PTE_BLOCK_NON_SHARE |
398 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
399 	}, {
400 		/* Peripherals */
401 		.virt = 0x2D000000UL,
402 		.phys = 0x2D000000UL,
403 		.size = 0x1600000UL,
404 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
405 			 PTE_BLOCK_NON_SHARE |
406 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
407 	}, {
408 		/* FLEXSPI1-2 */
409 		.virt = 0x40000000UL,
410 		.phys = 0x40000000UL,
411 		.size = 0x40000000UL,
412 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
413 			 PTE_BLOCK_NON_SHARE |
414 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
415 	}, {
416 		/* DRAM1 */
417 		.virt = 0x80000000UL,
418 		.phys = 0x80000000UL,
419 		.size = PHYS_SDRAM_SIZE,
420 		.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
421 			 PTE_BLOCK_OUTER_SHARE
422 	}, {
423 		/*
424 		 * empty entrie to split table entry 5
425 		 * if needed when TEEs are used
426 		 */
427 		0,
428 	}, {
429 		/* List terminator */
430 		0,
431 	}
432 };
433 
434 struct mm_region *mem_map = imx8ulp_arm64_mem_map;
435 
imx8ulp_find_dram_entry_in_mem_map(void)436 static unsigned int imx8ulp_find_dram_entry_in_mem_map(void)
437 {
438 	int i;
439 
440 	for (i = 0; i < ARRAY_SIZE(imx8ulp_arm64_mem_map); i++)
441 		if (imx8ulp_arm64_mem_map[i].phys == CFG_SYS_SDRAM_BASE)
442 			return i;
443 
444 	hang();	/* Entry not found, this must never happen. */
445 }
446 
447 /* simplify the page table size to enhance boot speed */
448 #define MAX_PTE_ENTRIES		512
449 #define MAX_MEM_MAP_REGIONS	16
get_page_table_size(void)450 u64 get_page_table_size(void)
451 {
452 	u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
453 	u64 size = 0;
454 
455 	/*
456 	 * For each memory region, the max table size:
457 	 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
458 	 */
459 	size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
460 
461 	/*
462 	 * We need to duplicate our page table once to have an emergency pt to
463 	 * resort to when splitting page tables later on
464 	 */
465 	size *= 2;
466 
467 	/*
468 	 * We may need to split page tables later on if dcache settings change,
469 	 * so reserve up to 4 (random pick) page tables for that.
470 	 */
471 	size += one_pt * 4;
472 
473 	return size;
474 }
475 
enable_caches(void)476 void enable_caches(void)
477 {
478 	/* If OPTEE runs, remove OPTEE memory from MMU table to avoid speculative prefetch */
479 	if (rom_pointer[1]) {
480 		/*
481 		 * TEE are loaded, So the ddr bank structures
482 		 * have been modified update mmu table accordingly
483 		 */
484 		int i = 0;
485 		int entry = imx8ulp_find_dram_entry_in_mem_map();
486 		u64 attrs = imx8ulp_arm64_mem_map[entry].attrs;
487 
488 		while (i < CONFIG_NR_DRAM_BANKS &&
489 		       entry < ARRAY_SIZE(imx8ulp_arm64_mem_map)) {
490 			if (gd->bd->bi_dram[i].start == 0)
491 				break;
492 			imx8ulp_arm64_mem_map[entry].phys = gd->bd->bi_dram[i].start;
493 			imx8ulp_arm64_mem_map[entry].virt = gd->bd->bi_dram[i].start;
494 			imx8ulp_arm64_mem_map[entry].size = gd->bd->bi_dram[i].size;
495 			imx8ulp_arm64_mem_map[entry].attrs = attrs;
496 			debug("Added memory mapping (%d): %llx %llx\n", entry,
497 			      imx8ulp_arm64_mem_map[entry].phys, imx8ulp_arm64_mem_map[entry].size);
498 			i++; entry++;
499 		}
500 	}
501 
502 	icache_enable();
503 	dcache_enable();
504 }
505 
board_phys_sdram_size(phys_size_t * size)506 __weak int board_phys_sdram_size(phys_size_t *size)
507 {
508 	if (!size)
509 		return -EINVAL;
510 
511 	*size = PHYS_SDRAM_SIZE;
512 	return 0;
513 }
514 
dram_init(void)515 int dram_init(void)
516 {
517 	unsigned int entry = imx8ulp_find_dram_entry_in_mem_map();
518 	phys_size_t sdram_size;
519 	int ret;
520 
521 	ret = board_phys_sdram_size(&sdram_size);
522 	if (ret)
523 		return ret;
524 
525 	/* rom_pointer[1] contains the size of TEE occupies */
526 	if (rom_pointer[1])
527 		gd->ram_size = sdram_size - rom_pointer[1];
528 	else
529 		gd->ram_size = sdram_size;
530 
531 	/* also update the SDRAM size in the mem_map used externally */
532 	imx8ulp_arm64_mem_map[entry].size = sdram_size;
533 	return 0;
534 }
535 
dram_init_banksize(void)536 int dram_init_banksize(void)
537 {
538 	int bank = 0;
539 	int ret;
540 	phys_size_t sdram_size;
541 
542 	ret = board_phys_sdram_size(&sdram_size);
543 	if (ret)
544 		return ret;
545 
546 	gd->bd->bi_dram[bank].start = PHYS_SDRAM;
547 	if (rom_pointer[1]) {
548 		phys_addr_t optee_start = (phys_addr_t)rom_pointer[0];
549 		phys_size_t optee_size = (size_t)rom_pointer[1];
550 
551 		gd->bd->bi_dram[bank].size = optee_start - gd->bd->bi_dram[bank].start;
552 		if ((optee_start + optee_size) < (PHYS_SDRAM + sdram_size)) {
553 			if (++bank >= CONFIG_NR_DRAM_BANKS) {
554 				puts("CONFIG_NR_DRAM_BANKS is not enough\n");
555 				return -1;
556 			}
557 
558 			gd->bd->bi_dram[bank].start = optee_start + optee_size;
559 			gd->bd->bi_dram[bank].size = PHYS_SDRAM +
560 				sdram_size - gd->bd->bi_dram[bank].start;
561 		}
562 	} else {
563 		gd->bd->bi_dram[bank].size = sdram_size;
564 	}
565 
566 	return 0;
567 }
568 
get_effective_memsize(void)569 phys_size_t get_effective_memsize(void)
570 {
571 	/* return the first bank as effective memory */
572 	if (rom_pointer[1])
573 		return ((phys_addr_t)rom_pointer[0] - PHYS_SDRAM);
574 
575 	return gd->ram_size;
576 }
577 
578 #ifdef CONFIG_ENV_VARS_UBOOT_RUNTIME_CONFIG
get_board_serial(struct tag_serialnr * serialnr)579 void get_board_serial(struct tag_serialnr *serialnr)
580 {
581 	u32 uid[4];
582 	u32 res;
583 	int ret;
584 
585 	ret = ahab_read_common_fuse(1, uid, 4, &res);
586 	if (ret)
587 		printf("ahab read fuse failed %d, 0x%x\n", ret, res);
588 	else
589 		printf("UID 0x%x,0x%x,0x%x,0x%x\n", uid[0], uid[1], uid[2], uid[3]);
590 
591 	serialnr->low = uid[0];
592 	serialnr->high = uid[3];
593 }
594 #endif
595 
set_core0_reset_vector(u32 entry)596 static void set_core0_reset_vector(u32 entry)
597 {
598 	/* Update SIM1 DGO8 for reset vector base */
599 	writel(entry, SIM1_BASE_ADDR + 0x5c);
600 
601 	/* set update bit */
602 	setbits_le32(SIM1_BASE_ADDR + 0x8, 0x1 << 24);
603 
604 	/* polling the ack */
605 	while ((readl(SIM1_BASE_ADDR + 0x8) & (0x1 << 26)) == 0)
606 		;
607 
608 	/* clear the update */
609 	clrbits_le32(SIM1_BASE_ADDR + 0x8, (0x1 << 24));
610 
611 	/* clear the ack by set 1 */
612 	setbits_le32(SIM1_BASE_ADDR + 0x8, (0x1 << 26));
613 }
614 
615 /* Not used now */
trdc_set_access(void)616 int trdc_set_access(void)
617 {
618 	/*
619 	 * TRDC mgr + 4 MBC + 2 MRC.
620 	 */
621 	trdc_mbc_set_access(2, 7, 0, 49, true);
622 	trdc_mbc_set_access(2, 7, 0, 50, true);
623 	trdc_mbc_set_access(2, 7, 0, 51, true);
624 	trdc_mbc_set_access(2, 7, 0, 52, true);
625 	trdc_mbc_set_access(2, 7, 0, 53, true);
626 	trdc_mbc_set_access(2, 7, 0, 54, true);
627 
628 	/* 0x1fff8000 used for resource table by remoteproc */
629 	trdc_mbc_set_access(0, 7, 2, 31, false);
630 
631 	/* CGC0: PBridge0 slot 47 and PCC0 slot 48 */
632 	trdc_mbc_set_access(2, 7, 0, 47, false);
633 	trdc_mbc_set_access(2, 7, 0, 48, false);
634 
635 	/* PCC1 */
636 	trdc_mbc_set_access(2, 7, 1, 17, false);
637 	trdc_mbc_set_access(2, 7, 1, 34, false);
638 
639 	/* Iomuxc0: : PBridge1 slot 33 */
640 	trdc_mbc_set_access(2, 7, 1, 33, false);
641 
642 	/* flexspi0 */
643 	trdc_mbc_set_access(2, 7, 0, 57, false);
644 	trdc_mrc_region_set_access(0, 7, 0x04000000, 0x0c000000, false);
645 
646 	/* tpm0: PBridge1 slot 21 */
647 	trdc_mbc_set_access(2, 7, 1, 21, false);
648 	/* lpi2c0: PBridge1 slot 24 */
649 	trdc_mbc_set_access(2, 7, 1, 24, false);
650 
651 	/* Allow M33 to access TRDC MGR */
652 	trdc_mbc_set_access(2, 6, 0, 49, true);
653 	trdc_mbc_set_access(2, 6, 0, 50, true);
654 	trdc_mbc_set_access(2, 6, 0, 51, true);
655 	trdc_mbc_set_access(2, 6, 0, 52, true);
656 	trdc_mbc_set_access(2, 6, 0, 53, true);
657 	trdc_mbc_set_access(2, 6, 0, 54, true);
658 
659 	/* Set SAI0 for eDMA 0, NS */
660 	trdc_mbc_set_access(2, 0, 1, 28, false);
661 
662 	/* Set SSRAM for eDMA0 access */
663 	trdc_mbc_set_access(0, 0, 2, 0, false);
664 	trdc_mbc_set_access(0, 0, 2, 1, false);
665 	trdc_mbc_set_access(0, 0, 2, 2, false);
666 	trdc_mbc_set_access(0, 0, 2, 3, false);
667 	trdc_mbc_set_access(0, 0, 2, 4, false);
668 	trdc_mbc_set_access(0, 0, 2, 5, false);
669 	trdc_mbc_set_access(0, 0, 2, 6, false);
670 	trdc_mbc_set_access(0, 0, 2, 7, false);
671 
672 	writel(0x800000a0, 0x28031840);
673 
674 	return 0;
675 }
676 
lpav_configure(bool lpav_to_m33)677 void lpav_configure(bool lpav_to_m33)
678 {
679 	if (!lpav_to_m33)
680 		setbits_le32(SIM_SEC_BASE_ADDR + 0x44, BIT(7)); /* LPAV to APD */
681 
682 	/* PXP/GPU 2D/3D/DCNANO/MIPI_DSI/EPDC/HIFI4 to APD */
683 	setbits_le32(SIM_SEC_BASE_ADDR + 0x4c, 0x7F);
684 
685 	/* LPAV slave/dma2 ch allocation and request allocation to APD */
686 	writel(0x1f, SIM_SEC_BASE_ADDR + 0x50);
687 	writel(0xffffffff, SIM_SEC_BASE_ADDR + 0x54);
688 	writel(0x003fffff, SIM_SEC_BASE_ADDR + 0x58);
689 }
690 
load_lposc_fuse(void)691 void load_lposc_fuse(void)
692 {
693 	int ret;
694 	u32 val = 0, val2 = 0, reg;
695 
696 	ret = fuse_read(25, 0, &val);
697 	if (ret)
698 		return; /* failed */
699 
700 	ret = fuse_read(25, 1, &val2);
701 	if (ret)
702 		return; /* failed */
703 
704 	/* LPOSCCTRL */
705 	reg = readl(0x2802f304);
706 	reg &= ~0xff;
707 	reg |= (val & 0xff);
708 	writel(reg, 0x2802f304);
709 }
710 
set_lpav_qos(void)711 void set_lpav_qos(void)
712 {
713 	/* Set read QoS of dcnano on LPAV NIC */
714 	writel(0xf, 0x2e447100);
715 }
716 
arch_cpu_init(void)717 int arch_cpu_init(void)
718 {
719 	if (IS_ENABLED(CONFIG_SPL_BUILD)) {
720 		/* Enable System Reset Interrupt using WDOG_AD */
721 		setbits_le32(CMC1_BASE_ADDR + 0x8C, BIT(13));
722 		/* Clear AD_PERIPH Power switch domain out of reset interrupt flag */
723 		setbits_le32(CMC1_BASE_ADDR + 0x70, BIT(4));
724 
725 		if (readl(CMC1_BASE_ADDR + 0x90) & BIT(13)) {
726 			/* Clear System Reset Interrupt Flag Register of WDOG_AD */
727 			setbits_le32(CMC1_BASE_ADDR + 0x90, BIT(13));
728 			/* Reset WDOG to clear reset request */
729 			pcc_reset_peripheral(3, WDOG3_PCC3_SLOT, true);
730 			pcc_reset_peripheral(3, WDOG3_PCC3_SLOT, false);
731 		}
732 
733 		/* Disable wdog */
734 		init_wdog();
735 
736 		if (get_boot_mode() == SINGLE_BOOT)
737 			lpav_configure(false);
738 		else
739 			lpav_configure(true);
740 
741 		/* Release xrdc, then allow A35 to write SRAM2 */
742 		if (rdc_enabled_in_boot())
743 			release_rdc(RDC_XRDC);
744 
745 		xrdc_mrc_region_set_access(2, CONFIG_SPL_TEXT_BASE, 0xE00);
746 
747 		clock_init_early();
748 
749 		spl_pass_boot_info();
750 	} else {
751 		int ret;
752 		/* reconfigure core0 reset vector to ROM */
753 		set_core0_reset_vector(0x1000);
754 
755 		if (is_m33_handshake_necessary()) {
756 			/* Start handshake with M33 to ensure TRDC configuration completed */
757 			ret = m33_image_handshake(1000);
758 			if (!ret)
759 				gd->arch.m33_handshake_done = true;
760 			else /* Skip and go through to panic in checkcpu as console is ready then */
761 				gd->arch.m33_handshake_done = false;
762 		}
763 	}
764 
765 	return 0;
766 }
767 
checkcpu(void)768 int checkcpu(void)
769 {
770 	if (is_m33_handshake_necessary()) {
771 		if (!gd->arch.m33_handshake_done) {
772 			puts("M33 Sync: Timeout, Boot Stop!\n");
773 			hang();
774 		} else {
775 			puts("M33 Sync: OK\n");
776 		}
777 	}
778 	return 0;
779 }
780 
imx8ulp_dm_post_init(void)781 int imx8ulp_dm_post_init(void)
782 {
783 	struct udevice *devp;
784 	int ret;
785 	u32 res;
786 	struct sentinel_get_info_data *info = (struct sentinel_get_info_data *)SRAM0_BASE;
787 
788 	ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(imx8ulp_mu), &devp);
789 	if (ret) {
790 		printf("could not get S400 mu %d\n", ret);
791 		return ret;
792 	}
793 
794 	ret = ahab_get_info(info, &res);
795 	if (ret) {
796 		printf("ahab_get_info failed %d\n", ret);
797 		/* fallback to A0.1 revision */
798 		memset((void *)info, 0, sizeof(struct sentinel_get_info_data));
799 		info->soc = 0xa000084d;
800 	}
801 
802 	set_cpu_info(info);
803 
804 	return 0;
805 }
806 
imx8ulp_evt_dm_post_init(void * ctx,struct event * event)807 static int imx8ulp_evt_dm_post_init(void *ctx, struct event *event)
808 {
809 	return imx8ulp_dm_post_init();
810 }
811 EVENT_SPY(EVT_DM_POST_INIT_F, imx8ulp_evt_dm_post_init);
812 
813 #if defined(CONFIG_SPL_BUILD)
jump_to_image_no_args(struct spl_image_info * spl_image)814 __weak void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
815 {
816 	debug("image entry point: 0x%lx\n", spl_image->entry_point);
817 
818 	set_core0_reset_vector((u32)spl_image->entry_point);
819 
820 	/* Enable the 512KB cache */
821 	setbits_le32(SIM1_BASE_ADDR + 0x30, (0x1 << 4));
822 
823 	/* reset core */
824 	setbits_le32(SIM1_BASE_ADDR + 0x30, (0x1 << 16));
825 
826 	while (1)
827 		;
828 }
829 #endif
830 
imx_get_mac_from_fuse(int dev_id,unsigned char * mac)831 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
832 {
833 	u32 val[2] = {};
834 	int ret;
835 
836 	ret = fuse_read(5, 3, &val[0]);
837 	if (ret)
838 		goto err;
839 
840 	ret = fuse_read(5, 4, &val[1]);
841 	if (ret)
842 		goto err;
843 
844 	mac[0] = val[0];
845 	mac[1] = val[0] >> 8;
846 	mac[2] = val[0] >> 16;
847 	mac[3] = val[0] >> 24;
848 	mac[4] = val[1];
849 	mac[5] = val[1] >> 8;
850 
851 	debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
852 	      __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
853 	return;
854 err:
855 	memset(mac, 0, 6);
856 	printf("%s: fuse read err: %d\n", __func__, ret);
857 }
858 
859 int (*card_emmc_is_boot_part_en)(void) = (void *)0x67cc;
spl_arch_boot_image_offset(u32 image_offset,u32 rom_bt_dev)860 u32 spl_arch_boot_image_offset(u32 image_offset, u32 rom_bt_dev)
861 {
862 	/* Hard code for eMMC image_offset on 8ULP ROM, need fix by ROM, temp workaround */
863 	if (is_soc_rev(CHIP_REV_1_0) && ((rom_bt_dev >> 16) & 0xff) == BT_DEV_TYPE_MMC &&
864 		card_emmc_is_boot_part_en())
865 		image_offset = 0;
866 
867 	return image_offset;
868 }
869 
env_get_location(enum env_operation op,int prio)870 enum env_location env_get_location(enum env_operation op, int prio)
871 {
872 	enum boot_device dev = get_boot_device();
873 	enum env_location env_loc = ENVL_UNKNOWN;
874 
875 	if (prio)
876 		return env_loc;
877 
878 	switch (dev) {
879 #ifdef CONFIG_ENV_IS_IN_SPI_FLASH
880 	case QSPI_BOOT:
881 		env_loc = ENVL_SPI_FLASH;
882 		break;
883 #endif
884 #ifdef CONFIG_ENV_IS_IN_MMC
885 	case SD1_BOOT:
886 	case SD2_BOOT:
887 	case SD3_BOOT:
888 	case MMC1_BOOT:
889 	case MMC2_BOOT:
890 	case MMC3_BOOT:
891 		env_loc =  ENVL_MMC;
892 		break;
893 #endif
894 	default:
895 #if defined(CONFIG_ENV_IS_NOWHERE)
896 		env_loc = ENVL_NOWHERE;
897 #endif
898 		break;
899 	}
900 
901 	return env_loc;
902 }
903