1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * K3: R5 Common Architecture initialization
4 *
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
6 */
7
8 #include <env.h>
9 #include <linux/printk.h>
10 #include <linux/types.h>
11 #include <asm/hardware.h>
12 #include <asm/io.h>
13 #include <image.h>
14 #include <fs_loader.h>
15 #include <linux/soc/ti/ti_sci_protocol.h>
16 #include <spl.h>
17 #include <remoteproc.h>
18 #include <elf.h>
19
20 #include "../common.h"
21
22 #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
23 enum {
24 IMAGE_ID_ATF,
25 IMAGE_ID_OPTEE,
26 IMAGE_ID_SPL,
27 IMAGE_ID_DM_FW,
28 IMAGE_ID_TIFSSTUB_HS,
29 IMAGE_ID_TIFSSTUB_FS,
30 IMAGE_ID_TIFSSTUB_GP,
31 IMAGE_AMT,
32 };
33
34 #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
35 static const char *image_os_match[IMAGE_AMT] = {
36 "arm-trusted-firmware",
37 "tee",
38 "U-Boot",
39 "DM",
40 "tifsstub-hs",
41 "tifsstub-fs",
42 "tifsstub-gp",
43 };
44 #endif
45
46 static struct image_info fit_image_info[IMAGE_AMT];
47
init_env(void)48 void init_env(void)
49 {
50 #ifdef CONFIG_SPL_ENV_SUPPORT
51 char *part;
52
53 env_init();
54 env_relocate();
55 switch (spl_boot_device()) {
56 case BOOT_DEVICE_MMC2:
57 part = env_get("bootpart");
58 env_set("storage_interface", "mmc");
59 env_set("fw_dev_part", part);
60 break;
61 case BOOT_DEVICE_SPI:
62 env_set("storage_interface", "ubi");
63 env_set("fw_ubi_mtdpart", "UBI");
64 env_set("fw_ubi_volume", "UBI0");
65 break;
66 default:
67 printf("%s from device %u not supported!\n",
68 __func__, spl_boot_device());
69 return;
70 }
71 #endif
72 }
73
load_firmware(char * name_fw,char * name_loadaddr,u32 * loadaddr)74 int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
75 {
76 struct udevice *fsdev;
77 char *name = NULL;
78 int size = 0;
79
80 if (!CONFIG_IS_ENABLED(FS_LOADER))
81 return 0;
82
83 *loadaddr = 0;
84 #ifdef CONFIG_SPL_ENV_SUPPORT
85 switch (spl_boot_device()) {
86 case BOOT_DEVICE_MMC2:
87 name = env_get(name_fw);
88 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
89 break;
90 default:
91 printf("Loading rproc fw image from device %u not supported!\n",
92 spl_boot_device());
93 return 0;
94 }
95 #endif
96 if (!*loadaddr)
97 return 0;
98
99 if (!get_fs_loader(&fsdev)) {
100 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
101 0, 0);
102 }
103
104 return size;
105 }
106
release_resources_for_core_shutdown(void)107 void release_resources_for_core_shutdown(void)
108 {
109 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
110 struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
111 struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
112 int ret;
113 u32 i;
114
115 /* Iterate through list of devices to put (shutdown) */
116 for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
117 u32 id = put_device_ids[i];
118
119 ret = dev_ops->put_device(ti_sci, id);
120 if (ret)
121 panic("Failed to put device %u (%d)\n", id, ret);
122 }
123
124 /* Iterate through list of cores to put (shutdown) */
125 for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
126 u32 id = put_core_ids[i];
127
128 /*
129 * Queue up the core shutdown request. Note that this call
130 * needs to be followed up by an actual invocation of an WFE
131 * or WFI CPU instruction.
132 */
133 ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
134 if (ret)
135 panic("Failed sending core %u shutdown message (%d)\n",
136 id, ret);
137 }
138 }
139
jump_to_image(struct spl_image_info * spl_image)140 void __noreturn jump_to_image(struct spl_image_info *spl_image)
141 {
142 typedef void __noreturn (*image_entry_noargs_t)(void);
143 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
144 u32 loadaddr = 0;
145 int ret, size = 0, shut_cpu = 0;
146
147 /* Release all the exclusive devices held by SPL before starting ATF */
148 ti_sci->ops.dev_ops.release_exclusive_devices();
149
150 ret = rproc_init();
151 if (ret)
152 panic("rproc failed to be initialized (%d)\n", ret);
153
154 init_env();
155
156 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
157 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
158 &loadaddr);
159 }
160
161 /*
162 * It is assumed that remoteproc device 1 is the corresponding
163 * Cortex-A core which runs ATF. Make sure DT reflects the same.
164 */
165 if (!fit_image_info[IMAGE_ID_ATF].image_start)
166 fit_image_info[IMAGE_ID_ATF].image_start =
167 spl_image->entry_point;
168
169 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
170 if (ret)
171 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
172
173 #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
174 /* Authenticate ATF */
175 void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
176
177 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
178 fit_image_info[IMAGE_ID_ATF].image_start,
179 fit_image_info[IMAGE_ID_ATF].image_len,
180 image_os_match[IMAGE_ID_ATF]);
181
182 ti_secure_image_post_process(&image_addr,
183 (size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
184
185 /* Authenticate OPTEE */
186 image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
187
188 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
189 fit_image_info[IMAGE_ID_OPTEE].image_start,
190 fit_image_info[IMAGE_ID_OPTEE].image_len,
191 image_os_match[IMAGE_ID_OPTEE]);
192
193 ti_secure_image_post_process(&image_addr,
194 (size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
195 #endif
196
197 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
198 !(size > 0 && valid_elf_image(loadaddr))) {
199 shut_cpu = 1;
200 goto start_arm64;
201 }
202
203 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
204 loadaddr = load_elf_image_phdr(loadaddr);
205 } else {
206 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
207 if (valid_elf_image(loadaddr))
208 loadaddr = load_elf_image_phdr(loadaddr);
209 }
210
211 debug("%s: jumping to address %x\n", __func__, loadaddr);
212
213 start_arm64:
214 /* Add an extra newline to differentiate the ATF logs from SPL */
215 printf("Starting ATF on ARM64 core...\n\n");
216
217 ret = rproc_start(1);
218 if (ret)
219 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
220
221 if (shut_cpu) {
222 debug("Shutting down...\n");
223 release_resources_for_core_shutdown();
224
225 while (1)
226 asm volatile("wfe");
227 }
228 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
229
230 image_entry();
231 }
232 #endif
233
disable_linefill_optimization(void)234 void disable_linefill_optimization(void)
235 {
236 u32 actlr;
237
238 /*
239 * On K3 devices there are 2 conditions where R5F can deadlock:
240 * 1.When software is performing series of store operations to
241 * cacheable write back/write allocate memory region and later
242 * on software execute barrier operation (DSB or DMB). R5F may
243 * hang at the barrier instruction.
244 * 2.When software is performing a mix of load and store operations
245 * within a tight loop and store operations are all writing to
246 * cacheable write back/write allocates memory regions, R5F may
247 * hang at one of the load instruction.
248 *
249 * To avoid the above two conditions disable linefill optimization
250 * inside Cortex R5F.
251 */
252 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
253 actlr |= (1 << 13); /* Set DLFO bit */
254 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
255 }
256
remove_fwl_region(struct fwl_data * fwl)257 int remove_fwl_region(struct fwl_data *fwl)
258 {
259 struct ti_sci_handle *sci = get_ti_sci_handle();
260 struct ti_sci_fwl_ops *ops = &sci->ops.fwl_ops;
261 struct ti_sci_msg_fwl_region region;
262 int ret;
263
264 region.fwl_id = fwl->fwl_id;
265 region.region = fwl->regions;
266 region.n_permission_regs = 3;
267
268 ops->get_fwl_region(sci, ®ion);
269
270 /* zero out the enable field of the firewall */
271 region.control = region.control & ~0xF;
272
273 pr_debug("Disabling firewall id: %d region: %d\n",
274 region.fwl_id, region.region);
275
276 ret = ops->set_fwl_region(sci, ®ion);
277 if (ret)
278 pr_err("Could not disable firewall\n");
279 return ret;
280 }
281
remove_fwl_regions(struct fwl_data fwl_data,size_t num_regions,enum k3_firewall_region_type fwl_type)282 static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
283 enum k3_firewall_region_type fwl_type)
284 {
285 struct ti_sci_fwl_ops *fwl_ops;
286 struct ti_sci_handle *ti_sci;
287 struct ti_sci_msg_fwl_region region;
288 size_t j;
289
290 ti_sci = get_ti_sci_handle();
291 fwl_ops = &ti_sci->ops.fwl_ops;
292
293 for (j = 0; j < fwl_data.regions; j++) {
294 region.fwl_id = fwl_data.fwl_id;
295 region.region = j;
296 region.n_permission_regs = 3;
297
298 fwl_ops->get_fwl_region(ti_sci, ®ion);
299
300 /* Don't disable the background regions */
301 if (region.control != 0 &&
302 ((region.control >> K3_FIREWALL_BACKGROUND_BIT) & 1) == fwl_type) {
303 pr_debug("Attempting to disable firewall %5d (%25s)\n",
304 region.fwl_id, fwl_data.name);
305 region.control = 0;
306
307 if (fwl_ops->set_fwl_region(ti_sci, ®ion))
308 pr_err("Could not disable firewall %5d (%25s)\n",
309 region.fwl_id, fwl_data.name);
310 }
311 }
312 }
313
remove_fwl_configs(struct fwl_data * fwl_data,size_t fwl_data_size)314 void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
315 {
316 size_t i;
317
318 for (i = 0; i < fwl_data_size; i++) {
319 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
320 K3_FIREWALL_REGION_FOREGROUND);
321 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
322 K3_FIREWALL_REGION_BACKGROUND);
323 }
324 }
325
326 #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
board_fit_image_post_process(const void * fit,int node,void ** p_image,size_t * p_size)327 void board_fit_image_post_process(const void *fit, int node, void **p_image,
328 size_t *p_size)
329 {
330 int len;
331 int i;
332 const char *os;
333 u32 addr;
334
335 os = fdt_getprop(fit, node, "os", &len);
336 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
337
338 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
339 addr, *p_size, os);
340
341 for (i = 0; i < IMAGE_AMT; i++) {
342 if (!strcmp(os, image_os_match[i])) {
343 fit_image_info[i].image_start = addr;
344 fit_image_info[i].image_len = *p_size;
345 debug("%s: matched image for ID %d\n", __func__, i);
346 break;
347 }
348 }
349
350 if (i < IMAGE_AMT && i > IMAGE_ID_DM_FW) {
351 int device_type = get_device_type();
352
353 if ((device_type == K3_DEVICE_TYPE_HS_SE &&
354 strcmp(os, "tifsstub-hs")) ||
355 (device_type == K3_DEVICE_TYPE_HS_FS &&
356 strcmp(os, "tifsstub-fs")) ||
357 (device_type == K3_DEVICE_TYPE_GP &&
358 strcmp(os, "tifsstub-gp"))) {
359 *p_size = 0;
360 } else {
361 debug("tifsstub-type: %s\n", os);
362 }
363
364 return;
365 }
366
367 /*
368 * Only DM and the DTBs are being authenticated here,
369 * rest will be authenticated when A72 cluster is up
370 */
371 if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE)) {
372 ti_secure_image_check_binary(p_image, p_size);
373 ti_secure_image_post_process(p_image, p_size);
374 } else {
375 ti_secure_image_check_binary(p_image, p_size);
376 }
377 }
378 #endif
379