1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018, 2021 NXP
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <cpu.h>
9 #include <cpu_func.h>
10 #include <dm.h>
11 #include <event.h>
12 #include <init.h>
13 #include <log.h>
14 #include <asm/cache.h>
15 #include <asm/global_data.h>
16 #include <dm/device-internal.h>
17 #include <dm/lists.h>
18 #include <dm/uclass.h>
19 #include <errno.h>
20 #include <spl.h>
21 #include <thermal.h>
22 #include <firmware/imx/sci/sci.h>
23 #include <asm/arch/sys_proto.h>
24 #include <asm/arch-imx/cpu.h>
25 #include <asm/armv8/cpu.h>
26 #include <asm/armv8/mmu.h>
27 #include <asm/setup.h>
28 #include <asm/mach-imx/boot_mode.h>
29 #include <spl.h>
30 
31 DECLARE_GLOBAL_DATA_PTR;
32 
33 #define BT_PASSOVER_TAG	0x504F
get_pass_over_info(void)34 struct pass_over_info_t *get_pass_over_info(void)
35 {
36 	struct pass_over_info_t *p =
37 		(struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
38 
39 	if (p->barker != BT_PASSOVER_TAG ||
40 	    p->len != sizeof(struct pass_over_info_t))
41 		return NULL;
42 
43 	return p;
44 }
45 
arch_cpu_init(void)46 int arch_cpu_init(void)
47 {
48 #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_RECOVER_DATA_SECTION)
49 	spl_save_restore_data();
50 #endif
51 
52 #ifdef CONFIG_SPL_BUILD
53 	struct pass_over_info_t *pass_over;
54 
55 	if (is_soc_rev(CHIP_REV_A)) {
56 		pass_over = get_pass_over_info();
57 		if (pass_over && pass_over->g_ap_mu == 0) {
58 			/*
59 			 * When ap_mu is 0, means the U-Boot booted
60 			 * from first container
61 			 */
62 			sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
63 		}
64 	}
65 #endif
66 
67 	return 0;
68 }
69 
imx8_init_mu(void * ctx,struct event * event)70 static int imx8_init_mu(void *ctx, struct event *event)
71 {
72 	struct udevice *devp;
73 	int node, ret;
74 
75 	node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
76 
77 	ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
78 	if (ret) {
79 		printf("could not get scu %d\n", ret);
80 		return ret;
81 	}
82 
83 	if (is_imx8qm()) {
84 		ret = sc_pm_set_resource_power_mode(-1, SC_R_SMMU,
85 						    SC_PM_PW_MODE_ON);
86 		if (ret)
87 			return ret;
88 	}
89 
90 	return 0;
91 }
92 EVENT_SPY(EVT_DM_POST_INIT_F, imx8_init_mu);
93 
94 #if defined(CONFIG_ARCH_MISC_INIT)
arch_misc_init(void)95 int arch_misc_init(void)
96 {
97 	if (IS_ENABLED(CONFIG_FSL_CAAM)) {
98 		struct udevice *dev;
99 		int ret;
100 
101 		ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev);
102 		if (ret)
103 			printf("Failed to initialize caam_jr: %d\n", ret);
104 	}
105 
106 	return 0;
107 }
108 #endif
109 
print_bootinfo(void)110 int print_bootinfo(void)
111 {
112 	enum boot_device bt_dev = get_boot_device();
113 
114 	puts("Boot:  ");
115 	switch (bt_dev) {
116 	case SD1_BOOT:
117 		puts("SD0\n");
118 		break;
119 	case SD2_BOOT:
120 		puts("SD1\n");
121 		break;
122 	case SD3_BOOT:
123 		puts("SD2\n");
124 		break;
125 	case MMC1_BOOT:
126 		puts("MMC0\n");
127 		break;
128 	case MMC2_BOOT:
129 		puts("MMC1\n");
130 		break;
131 	case MMC3_BOOT:
132 		puts("MMC2\n");
133 		break;
134 	case FLEXSPI_BOOT:
135 		puts("FLEXSPI\n");
136 		break;
137 	case SATA_BOOT:
138 		puts("SATA\n");
139 		break;
140 	case NAND_BOOT:
141 		puts("NAND\n");
142 		break;
143 	case USB_BOOT:
144 		puts("USB\n");
145 		break;
146 	default:
147 		printf("Unknown device %u\n", bt_dev);
148 		break;
149 	}
150 
151 	return 0;
152 }
153 
get_boot_device(void)154 enum boot_device get_boot_device(void)
155 {
156 	enum boot_device boot_dev = SD1_BOOT;
157 
158 	sc_rsrc_t dev_rsrc;
159 
160 	sc_misc_get_boot_dev(-1, &dev_rsrc);
161 
162 	switch (dev_rsrc) {
163 	case SC_R_SDHC_0:
164 		boot_dev = MMC1_BOOT;
165 		break;
166 	case SC_R_SDHC_1:
167 		boot_dev = SD2_BOOT;
168 		break;
169 	case SC_R_SDHC_2:
170 		boot_dev = SD3_BOOT;
171 		break;
172 	case SC_R_NAND:
173 		boot_dev = NAND_BOOT;
174 		break;
175 	case SC_R_FSPI_0:
176 		boot_dev = FLEXSPI_BOOT;
177 		break;
178 	case SC_R_SATA_0:
179 		boot_dev = SATA_BOOT;
180 		break;
181 	case SC_R_USB_0:
182 	case SC_R_USB_1:
183 	case SC_R_USB_2:
184 		boot_dev = USB_BOOT;
185 		break;
186 	default:
187 		break;
188 	}
189 
190 	return boot_dev;
191 }
192 
193 #ifdef CONFIG_ENV_VARS_UBOOT_RUNTIME_CONFIG
194 #define FUSE_UNIQUE_ID_WORD0 16
195 #define FUSE_UNIQUE_ID_WORD1 17
get_board_serial(struct tag_serialnr * serialnr)196 void get_board_serial(struct tag_serialnr *serialnr)
197 {
198 	sc_err_t err;
199 	u32 val1 = 0, val2 = 0;
200 	u32 word1, word2;
201 
202 	if (!serialnr)
203 		return;
204 
205 	word1 = FUSE_UNIQUE_ID_WORD0;
206 	word2 = FUSE_UNIQUE_ID_WORD1;
207 
208 	err = sc_misc_otp_fuse_read(-1, word1, &val1);
209 	if (err != SC_ERR_NONE) {
210 		printf("%s fuse %d read error: %d\n", __func__, word1, err);
211 		return;
212 	}
213 
214 	err = sc_misc_otp_fuse_read(-1, word2, &val2);
215 	if (err != SC_ERR_NONE) {
216 		printf("%s fuse %d read error: %d\n", __func__, word2, err);
217 		return;
218 	}
219 	serialnr->low = val1;
220 	serialnr->high = val2;
221 }
222 #endif /*CONFIG_ENV_VARS_UBOOT_RUNTIME_CONFIG*/
223 
224 #ifdef CONFIG_ENV_IS_IN_MMC
board_mmc_get_env_dev(int devno)225 __weak int board_mmc_get_env_dev(int devno)
226 {
227 	return CONFIG_SYS_MMC_ENV_DEV;
228 }
229 
mmc_get_env_dev(void)230 int mmc_get_env_dev(void)
231 {
232 	sc_rsrc_t dev_rsrc;
233 	int devno;
234 
235 	sc_misc_get_boot_dev(-1, &dev_rsrc);
236 
237 	switch (dev_rsrc) {
238 	case SC_R_SDHC_0:
239 		devno = 0;
240 		break;
241 	case SC_R_SDHC_1:
242 		devno = 1;
243 		break;
244 	case SC_R_SDHC_2:
245 		devno = 2;
246 		break;
247 	default:
248 		/* If not boot from sd/mmc, use default value */
249 		return CONFIG_SYS_MMC_ENV_DEV;
250 	}
251 
252 	return board_mmc_get_env_dev(devno);
253 }
254 #endif
255 
256 #define MEMSTART_ALIGNMENT  SZ_2M /* Align the memory start with 2MB */
257 
get_owned_memreg(sc_rm_mr_t mr,sc_faddr_t * addr_start,sc_faddr_t * addr_end)258 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
259 			    sc_faddr_t *addr_end)
260 {
261 	sc_faddr_t start, end;
262 	int ret;
263 	bool owned;
264 
265 	owned = sc_rm_is_memreg_owned(-1, mr);
266 	if (owned) {
267 		ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
268 		if (ret) {
269 			printf("Memreg get info failed, %d\n", ret);
270 			return -EINVAL;
271 		}
272 		debug("0x%llx -- 0x%llx\n", start, end);
273 		*addr_start = start;
274 		*addr_end = end;
275 
276 		return 0;
277 	}
278 
279 	return -EINVAL;
280 }
281 
board_mem_get_layout(u64 * phys_sdram_1_start,u64 * phys_sdram_1_size,u64 * phys_sdram_2_start,u64 * phys_sdram_2_size)282 __weak void board_mem_get_layout(u64 *phys_sdram_1_start,
283 				 u64 *phys_sdram_1_size,
284 				 u64 *phys_sdram_2_start,
285 				 u64 *phys_sdram_2_size)
286 {
287 	*phys_sdram_1_start = PHYS_SDRAM_1;
288 	*phys_sdram_1_size = PHYS_SDRAM_1_SIZE;
289 	*phys_sdram_2_start = PHYS_SDRAM_2;
290 	*phys_sdram_2_size = PHYS_SDRAM_2_SIZE;
291 }
292 
get_effective_memsize(void)293 phys_size_t get_effective_memsize(void)
294 {
295 	sc_rm_mr_t mr;
296 	sc_faddr_t start, end, end1, start_aligned;
297 	u64 phys_sdram_1_start, phys_sdram_1_size;
298 	u64 phys_sdram_2_start, phys_sdram_2_size;
299 	int err;
300 
301 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
302 			     &phys_sdram_2_start, &phys_sdram_2_size);
303 
304 
305 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
306 	for (mr = 0; mr < 64; mr++) {
307 		err = get_owned_memreg(mr, &start, &end);
308 		if (!err) {
309 			start_aligned = roundup(start, MEMSTART_ALIGNMENT);
310 			/* Too small memory region, not use it */
311 			if (start_aligned > end)
312 				continue;
313 
314 			/* Find the memory region runs the U-Boot */
315 			if (start >= phys_sdram_1_start && start <= end1 &&
316 			    (start <= CONFIG_TEXT_BASE &&
317 			    end >= CONFIG_TEXT_BASE)) {
318 				if ((end + 1) <=
319 				    ((sc_faddr_t)phys_sdram_1_start +
320 				    phys_sdram_1_size))
321 					return (end - phys_sdram_1_start + 1);
322 				else
323 					return phys_sdram_1_size;
324 			}
325 		}
326 	}
327 
328 	return phys_sdram_1_size;
329 }
330 
dram_init(void)331 int dram_init(void)
332 {
333 	sc_rm_mr_t mr;
334 	sc_faddr_t start, end, end1, end2;
335 	u64 phys_sdram_1_start, phys_sdram_1_size;
336 	u64 phys_sdram_2_start, phys_sdram_2_size;
337 	int err;
338 
339 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
340 			     &phys_sdram_2_start, &phys_sdram_2_size);
341 
342 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
343 	end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
344 	for (mr = 0; mr < 64; mr++) {
345 		err = get_owned_memreg(mr, &start, &end);
346 		if (!err) {
347 			start = roundup(start, MEMSTART_ALIGNMENT);
348 			/* Too small memory region, not use it */
349 			if (start > end)
350 				continue;
351 
352 			if (start >= phys_sdram_1_start && start <= end1) {
353 				if ((end + 1) <= end1)
354 					gd->ram_size += end - start + 1;
355 				else
356 					gd->ram_size += end1 - start;
357 			} else if (start >= phys_sdram_2_start &&
358 				   start <= end2) {
359 				if ((end + 1) <= end2)
360 					gd->ram_size += end - start + 1;
361 				else
362 					gd->ram_size += end2 - start;
363 			}
364 		}
365 	}
366 
367 	/* If error, set to the default value */
368 	if (!gd->ram_size) {
369 		gd->ram_size = phys_sdram_1_size;
370 		gd->ram_size += phys_sdram_2_size;
371 	}
372 	return 0;
373 }
374 
dram_bank_sort(int current_bank)375 static void dram_bank_sort(int current_bank)
376 {
377 	phys_addr_t start;
378 	phys_size_t size;
379 
380 	while (current_bank > 0) {
381 		if (gd->bd->bi_dram[current_bank - 1].start >
382 		    gd->bd->bi_dram[current_bank].start) {
383 			start = gd->bd->bi_dram[current_bank - 1].start;
384 			size = gd->bd->bi_dram[current_bank - 1].size;
385 
386 			gd->bd->bi_dram[current_bank - 1].start =
387 				gd->bd->bi_dram[current_bank].start;
388 			gd->bd->bi_dram[current_bank - 1].size =
389 				gd->bd->bi_dram[current_bank].size;
390 
391 			gd->bd->bi_dram[current_bank].start = start;
392 			gd->bd->bi_dram[current_bank].size = size;
393 		}
394 		current_bank--;
395 	}
396 }
397 
dram_init_banksize(void)398 int dram_init_banksize(void)
399 {
400 	sc_rm_mr_t mr;
401 	sc_faddr_t start, end, end1, end2;
402 	int i = 0;
403 	u64 phys_sdram_1_start, phys_sdram_1_size;
404 	u64 phys_sdram_2_start, phys_sdram_2_size;
405 	int err;
406 
407 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
408 			     &phys_sdram_2_start, &phys_sdram_2_size);
409 
410 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
411 	end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
412 	for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
413 		err = get_owned_memreg(mr, &start, &end);
414 		if (!err) {
415 			start = roundup(start, MEMSTART_ALIGNMENT);
416 			if (start > end) /* Small memory region, no use it */
417 				continue;
418 
419 			if (start >= phys_sdram_1_start && start <= end1) {
420 				gd->bd->bi_dram[i].start = start;
421 
422 				if ((end + 1) <= end1)
423 					gd->bd->bi_dram[i].size =
424 						end - start + 1;
425 				else
426 					gd->bd->bi_dram[i].size = end1 - start;
427 
428 				dram_bank_sort(i);
429 				i++;
430 			} else if (start >= phys_sdram_2_start && start <= end2) {
431 				gd->bd->bi_dram[i].start = start;
432 
433 				if ((end + 1) <= end2)
434 					gd->bd->bi_dram[i].size =
435 						end - start + 1;
436 				else
437 					gd->bd->bi_dram[i].size = end2 - start;
438 
439 				dram_bank_sort(i);
440 				i++;
441 			}
442 		}
443 	}
444 
445 	/* If error, set to the default value */
446 	if (!i) {
447 		gd->bd->bi_dram[0].start = phys_sdram_1_start;
448 		gd->bd->bi_dram[0].size = phys_sdram_1_size;
449 		gd->bd->bi_dram[1].start = phys_sdram_2_start;
450 		gd->bd->bi_dram[1].size = phys_sdram_2_size;
451 	}
452 
453 	return 0;
454 }
455 
get_block_attrs(sc_faddr_t addr_start)456 static u64 get_block_attrs(sc_faddr_t addr_start)
457 {
458 	u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
459 		PTE_BLOCK_PXN | PTE_BLOCK_UXN;
460 	u64 phys_sdram_1_start, phys_sdram_1_size;
461 	u64 phys_sdram_2_start, phys_sdram_2_size;
462 
463 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
464 			     &phys_sdram_2_start, &phys_sdram_2_size);
465 
466 	if ((addr_start >= phys_sdram_1_start &&
467 	     addr_start <= ((sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size)) ||
468 	    (addr_start >= phys_sdram_2_start &&
469 	     addr_start <= ((sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size)))
470 		return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
471 
472 	return attr;
473 }
474 
get_block_size(sc_faddr_t addr_start,sc_faddr_t addr_end)475 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
476 {
477 	sc_faddr_t end1, end2;
478 	u64 phys_sdram_1_start, phys_sdram_1_size;
479 	u64 phys_sdram_2_start, phys_sdram_2_size;
480 
481 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
482 			     &phys_sdram_2_start, &phys_sdram_2_size);
483 
484 
485 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
486 	end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
487 
488 	if (addr_start >= phys_sdram_1_start && addr_start <= end1) {
489 		if ((addr_end + 1) > end1)
490 			return end1 - addr_start;
491 	} else if (addr_start >= phys_sdram_2_start && addr_start <= end2) {
492 		if ((addr_end + 1) > end2)
493 			return end2 - addr_start;
494 	}
495 
496 	return (addr_end - addr_start + 1);
497 }
498 
499 #define MAX_PTE_ENTRIES 512
500 #define MAX_MEM_MAP_REGIONS 16
501 
502 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
503 struct mm_region *mem_map = imx8_mem_map;
504 
enable_caches(void)505 void enable_caches(void)
506 {
507 	sc_rm_mr_t mr;
508 	sc_faddr_t start, end;
509 	int err, i;
510 
511 	/* Create map for registers access from 0x1c000000 to 0x80000000*/
512 	imx8_mem_map[0].virt = 0x1c000000UL;
513 	imx8_mem_map[0].phys = 0x1c000000UL;
514 	imx8_mem_map[0].size = 0x64000000UL;
515 	imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
516 			 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
517 
518 	i = 1;
519 	for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
520 		err = get_owned_memreg(mr, &start, &end);
521 		if (!err) {
522 			imx8_mem_map[i].virt = start;
523 			imx8_mem_map[i].phys = start;
524 			imx8_mem_map[i].size = get_block_size(start, end);
525 			imx8_mem_map[i].attrs = get_block_attrs(start);
526 			i++;
527 		}
528 	}
529 
530 	if (i < MAX_MEM_MAP_REGIONS) {
531 		imx8_mem_map[i].size = 0;
532 		imx8_mem_map[i].attrs = 0;
533 	} else {
534 		puts("Error, need more MEM MAP REGIONS reserved\n");
535 		icache_enable();
536 		return;
537 	}
538 
539 	for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
540 		debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
541 		      i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
542 		      imx8_mem_map[i].size, imx8_mem_map[i].attrs);
543 	}
544 
545 	icache_enable();
546 	dcache_enable();
547 }
548 
549 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
get_page_table_size(void)550 u64 get_page_table_size(void)
551 {
552 	u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
553 	u64 size = 0;
554 
555 	/*
556 	 * For each memory region, the max table size:
557 	 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
558 	 */
559 	size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
560 
561 	/*
562 	 * We need to duplicate our page table once to have an emergency pt to
563 	 * resort to when splitting page tables later on
564 	 */
565 	size *= 2;
566 
567 	/*
568 	 * We may need to split page tables later on if dcache settings change,
569 	 * so reserve up to 4 (random pick) page tables for that.
570 	 */
571 	size += one_pt * 4;
572 
573 	return size;
574 }
575 #endif
576 
577 #if defined(CONFIG_IMX8QM)
578 #define FUSE_MAC0_WORD0 452
579 #define FUSE_MAC0_WORD1 453
580 #define FUSE_MAC1_WORD0 454
581 #define FUSE_MAC1_WORD1 455
582 #elif defined(CONFIG_IMX8QXP)
583 #define FUSE_MAC0_WORD0 708
584 #define FUSE_MAC0_WORD1 709
585 #define FUSE_MAC1_WORD0 710
586 #define FUSE_MAC1_WORD1 711
587 #endif
588 
imx_get_mac_from_fuse(int dev_id,unsigned char * mac)589 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
590 {
591 	u32 word[2], val[2] = {};
592 	int i, ret;
593 
594 	if (dev_id == 0) {
595 		word[0] = FUSE_MAC0_WORD0;
596 		word[1] = FUSE_MAC0_WORD1;
597 	} else {
598 		word[0] = FUSE_MAC1_WORD0;
599 		word[1] = FUSE_MAC1_WORD1;
600 	}
601 
602 	for (i = 0; i < 2; i++) {
603 		ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
604 		if (ret < 0)
605 			goto err;
606 	}
607 
608 	mac[0] = val[0];
609 	mac[1] = val[0] >> 8;
610 	mac[2] = val[0] >> 16;
611 	mac[3] = val[0] >> 24;
612 	mac[4] = val[1];
613 	mac[5] = val[1] >> 8;
614 
615 	debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
616 	      __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
617 	return;
618 err:
619 	printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
620 }
621 
get_cpu_rev(void)622 u32 get_cpu_rev(void)
623 {
624 	u32 id = 0, rev = 0;
625 	int ret;
626 
627 	ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
628 	if (ret)
629 		return 0;
630 
631 	rev = (id >> 5)  & 0xf;
632 	id = (id & 0x1f) + MXC_SOC_IMX8;  /* Dummy ID for chip */
633 
634 	return (id << 12) | rev;
635 }
636 
board_boot_order(u32 * spl_boot_list)637 void board_boot_order(u32 *spl_boot_list)
638 {
639 	spl_boot_list[0] = spl_boot_device();
640 
641 	if (spl_boot_list[0] == BOOT_DEVICE_SPI) {
642 		/* Check whether we own the flexspi0, if not, use NOR boot */
643 		if (!sc_rm_is_resource_owned(-1, SC_R_FSPI_0))
644 			spl_boot_list[0] = BOOT_DEVICE_NOR;
645 	}
646 }
647 
m4_parts_booted(void)648 bool m4_parts_booted(void)
649 {
650 	sc_rm_pt_t m4_parts[2];
651 	int err;
652 
653 	err = sc_rm_get_resource_owner(-1, SC_R_M4_0_PID0, &m4_parts[0]);
654 	if (err) {
655 		printf("%s get resource [%d] owner error: %d\n", __func__,
656 		       SC_R_M4_0_PID0, err);
657 		return false;
658 	}
659 
660 	if (sc_pm_is_partition_started(-1, m4_parts[0]))
661 		return true;
662 
663 	if (is_imx8qm()) {
664 		err = sc_rm_get_resource_owner(-1, SC_R_M4_1_PID0, &m4_parts[1]);
665 		if (err) {
666 			printf("%s get resource [%d] owner error: %d\n",
667 			       __func__, SC_R_M4_1_PID0, err);
668 			return false;
669 		}
670 
671 		if (sc_pm_is_partition_started(-1, m4_parts[1]))
672 			return true;
673 	}
674 
675 	return false;
676 }
677