1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-02-21 GuEe-GUI first version
9 */
10
11 #include <rtthread.h>
12
13 #define DBG_TAG "cpu.aa64"
14 #define DBG_LVL DBG_INFO
15 #include <rtdbg.h>
16 #include <smp_call.h>
17 #include <cpu.h>
18 #include <mmu.h>
19 #include <cpuport.h>
20 #include <interrupt.h>
21 #include <gtimer.h>
22 #include <setup.h>
23 #include <stdlib.h>
24 #include <ioremap.h>
25 #include <rtdevice.h>
26 #include <gic.h>
27 #include <gicv3.h>
28 #include <mm_memblock.h>
29
30 #define SIZE_KB 1024
31 #define SIZE_MB (1024 * SIZE_KB)
32 #define SIZE_GB (1024 * SIZE_MB)
33
34 extern rt_ubase_t _start, _end;
35 extern void _secondary_cpu_entry(void);
36 extern size_t MMUTable[];
37 extern void *system_vectors;
38
39 static void *fdt_ptr = RT_NULL;
40 static rt_size_t fdt_size = 0;
41 static rt_uint64_t initrd_ranges[3] = { };
42
43 #ifdef RT_USING_SMP
44 extern struct cpu_ops_t cpu_psci_ops;
45 extern struct cpu_ops_t cpu_spin_table_ops;
46 #else
47 extern int rt_hw_cpu_id(void);
48 #endif
49
50 rt_uint64_t rt_cpu_mpidr_table[] =
51 {
52 [RT_CPUS_NR] = 0,
53 };
54
55 static struct cpu_ops_t *cpu_ops[] =
56 {
57 #ifdef RT_USING_SMP
58 &cpu_psci_ops,
59 &cpu_spin_table_ops,
60 #endif
61 };
62
63 static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
64
rt_hw_fdt_install_early(void * fdt)65 void rt_hw_fdt_install_early(void *fdt)
66 {
67 if (fdt != RT_NULL && !fdt_check_header(fdt))
68 {
69 fdt_ptr = fdt;
70 fdt_size = fdt_totalsize(fdt);
71 }
72 }
73
74 #ifdef RT_USING_HWTIMER
75 static rt_ubase_t loops_per_tick[RT_CPUS_NR];
76
cpu_get_cycles(void)77 static rt_ubase_t cpu_get_cycles(void)
78 {
79 rt_ubase_t cycles;
80
81 rt_hw_sysreg_read(cntpct_el0, cycles);
82
83 return cycles;
84 }
85
cpu_loops_per_tick_init(void)86 static void cpu_loops_per_tick_init(void)
87 {
88 rt_ubase_t offset;
89 volatile rt_ubase_t freq, step, cycles_end1, cycles_end2;
90 volatile rt_uint32_t cycles_count1 = 0, cycles_count2 = 0;
91
92 rt_hw_sysreg_read(cntfrq_el0, freq);
93 step = freq / RT_TICK_PER_SECOND;
94
95 cycles_end1 = cpu_get_cycles() + step;
96
97 while (cpu_get_cycles() < cycles_end1)
98 {
99 __asm__ volatile ("nop");
100 __asm__ volatile ("add %0, %0, #1":"=r"(cycles_count1));
101 }
102
103 cycles_end2 = cpu_get_cycles() + step;
104
105 while (cpu_get_cycles() < cycles_end2)
106 {
107 __asm__ volatile ("add %0, %0, #1":"=r"(cycles_count2));
108 }
109
110 if ((rt_int32_t)(cycles_count2 - cycles_count1) > 0)
111 {
112 offset = cycles_count2 - cycles_count1;
113 }
114 else
115 {
116 /* Impossible, but prepared for any eventualities */
117 offset = cycles_count2 / 4;
118 }
119
120 loops_per_tick[rt_hw_cpu_id()] = offset;
121 }
122
cpu_us_delay(rt_uint32_t us)123 static void cpu_us_delay(rt_uint32_t us)
124 {
125 volatile rt_base_t start = cpu_get_cycles(), cycles;
126
127 cycles = ((us * 0x10c7UL) * loops_per_tick[rt_hw_cpu_id()] * RT_TICK_PER_SECOND) >> 32;
128
129 while ((cpu_get_cycles() - start) < cycles)
130 {
131 rt_hw_cpu_relax();
132 }
133 }
134 #endif /* RT_USING_HWTIMER */
135
rt_hw_idle_wfi(void)136 rt_weak void rt_hw_idle_wfi(void)
137 {
138 __asm__ volatile ("wfi");
139 }
140
system_vectors_init(void)141 static void system_vectors_init(void)
142 {
143 rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
144 }
145
cpu_info_init(void)146 rt_inline void cpu_info_init(void)
147 {
148 int i = 0;
149 rt_uint64_t mpidr;
150 struct rt_ofw_node *np;
151
152 /* get boot cpu info */
153 rt_hw_sysreg_read(mpidr_el1, mpidr);
154
155 rt_ofw_foreach_cpu_node(np)
156 {
157 rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
158
159 if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
160 {
161 /* Only save affinity and res make smp boot can check */
162 hwid |= 1ULL << 31;
163 }
164 else
165 {
166 hwid = mpidr;
167 }
168
169 cpu_np[i] = np;
170 rt_cpu_mpidr_table[i] = hwid;
171
172 rt_ofw_data(np) = (void *)hwid;
173
174 for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
175 {
176 struct cpu_ops_t *ops = cpu_ops[idx];
177
178 if (ops->cpu_init)
179 {
180 ops->cpu_init(i, np);
181 }
182 }
183
184 if (++i >= RT_CPUS_NR)
185 {
186 break;
187 }
188 }
189
190 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
191
192 #ifdef RT_USING_HWTIMER
193 cpu_loops_per_tick_init();
194
195 if (!rt_device_hwtimer_us_delay)
196 {
197 rt_device_hwtimer_us_delay = &cpu_us_delay;
198 }
199 #endif /* RT_USING_HWTIMER */
200 }
201
rt_hw_common_setup(void)202 void rt_hw_common_setup(void)
203 {
204 rt_size_t kernel_start, kernel_end;
205 rt_size_t heap_start, heap_end;
206 rt_size_t init_page_start, init_page_end;
207 rt_size_t fdt_start, fdt_end;
208 rt_region_t init_page_region = { 0 };
209 rt_region_t platform_mem_region = { 0 };
210 static struct mem_desc platform_mem_desc;
211 const rt_ubase_t pv_off = PV_OFFSET;
212
213 system_vectors_init();
214
215 #ifdef RT_USING_SMART
216 rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xfffffffff0000000, 0x10000000, MMUTable, pv_off);
217 #else
218 rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x10000000, MMUTable, 0);
219 #endif
220
221 kernel_start = RT_ALIGN_DOWN((rt_size_t)rt_kmem_v2p((void *)&_start) - 64, ARCH_PAGE_SIZE);
222 kernel_end = RT_ALIGN((rt_size_t)rt_kmem_v2p((void *)&_end), ARCH_PAGE_SIZE);
223 heap_start = kernel_end;
224 heap_end = RT_ALIGN(heap_start + ARCH_HEAP_SIZE, ARCH_PAGE_SIZE);
225 init_page_start = heap_end;
226 init_page_end = RT_ALIGN(init_page_start + ARCH_INIT_PAGE_SIZE, ARCH_PAGE_SIZE);
227 fdt_start = init_page_end;
228 fdt_end = RT_ALIGN(fdt_start + fdt_size, ARCH_PAGE_SIZE);
229
230 platform_mem_region.start = kernel_start;
231 platform_mem_region.end = fdt_end;
232
233 rt_memblock_reserve_memory("kernel", kernel_start, kernel_end, MEMBLOCK_NONE);
234 rt_memblock_reserve_memory("memheap", heap_start, heap_end, MEMBLOCK_NONE);
235 rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
236 rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
237
238 /* To virtual address */
239 fdt_ptr = (void *)(fdt_ptr - pv_off);
240 #ifdef KERNEL_VADDR_START
241 if ((rt_ubase_t)fdt_ptr + fdt_size - KERNEL_VADDR_START > SIZE_GB)
242 {
243 fdt_ptr = rt_ioremap_early(fdt_ptr + pv_off, fdt_size);
244
245 RT_ASSERT(fdt_ptr != RT_NULL);
246 }
247 #endif
248 rt_memmove((void *)(fdt_start - pv_off), fdt_ptr, fdt_size);
249 fdt_ptr = (void *)fdt_start - pv_off;
250
251 rt_system_heap_init((void *)(heap_start - pv_off), (void *)(heap_end - pv_off));
252
253 init_page_region.start = init_page_start - pv_off;
254 init_page_region.end = init_page_end - pv_off;
255 rt_page_init(init_page_region);
256
257 /* create MMU mapping of kernel memory */
258 platform_mem_region.start = RT_ALIGN_DOWN(platform_mem_region.start, ARCH_PAGE_SIZE);
259 platform_mem_region.end = RT_ALIGN(platform_mem_region.end, ARCH_PAGE_SIZE);
260
261 platform_mem_desc.paddr_start = platform_mem_region.start;
262 platform_mem_desc.vaddr_start = platform_mem_region.start - pv_off;
263 platform_mem_desc.vaddr_end = platform_mem_region.end - pv_off - 1;
264 platform_mem_desc.attr = NORMAL_MEM;
265
266 rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
267
268 if (rt_fdt_prefetch(fdt_ptr))
269 {
270 /* Platform cannot be initialized */
271 RT_ASSERT(0);
272 }
273
274 rt_fdt_scan_chosen_stdout();
275
276 rt_fdt_scan_initrd(initrd_ranges);
277
278 rt_fdt_scan_memory();
279
280 rt_memblock_setup_memory_environment();
281
282 rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
283
284 rt_fdt_unflatten();
285
286 cpu_info_init();
287
288 #ifdef RT_USING_PIC
289 rt_pic_init();
290 rt_pic_irq_init();
291 #else
292 /* initialize hardware interrupt */
293 rt_hw_interrupt_init();
294
295 /* initialize uart */
296 rt_hw_uart_init();
297 #endif
298
299 #ifndef RT_HWTIMER_ARM_ARCH
300 /* initialize timer for os tick */
301 rt_hw_gtimer_init();
302 #endif /* !RT_HWTIMER_ARM_ARCH */
303
304 #ifdef RT_USING_COMPONENTS_INIT
305 rt_components_board_init();
306 #endif
307
308 #if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
309 rt_ofw_console_setup();
310 #endif
311
312 rt_thread_idle_sethook(rt_hw_idle_wfi);
313
314 #ifdef RT_USING_SMP
315 rt_smp_call_init();
316 /* Install the IPI handle */
317 rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
318 rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
319 rt_hw_ipi_handler_install(RT_SMP_CALL_IPI, rt_smp_call_ipi_handler);
320 rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
321 rt_hw_interrupt_umask(RT_STOP_IPI);
322 rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
323 #endif
324 }
325
326 #ifdef RT_USING_SMP
rt_hw_secondary_cpu_up(void)327 rt_weak void rt_hw_secondary_cpu_up(void)
328 {
329 int cpu_id = rt_hw_cpu_id();
330 rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
331
332 if (!entry)
333 {
334 LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
335 RT_ASSERT(0);
336 }
337
338 /* Maybe we are no in the first cpu */
339 for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
340 {
341 int err;
342 const char *enable_method;
343
344 if (!cpu_np[i] || i == cpu_id)
345 {
346 continue;
347 }
348
349 err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
350
351 for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
352 {
353 struct cpu_ops_t *ops = cpu_ops[idx];
354
355 if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
356 {
357 err = ops->cpu_boot(i, entry);
358
359 break;
360 }
361 }
362
363 if (err)
364 {
365 LOG_W("Call cpu %d on %s", i, "failed");
366 }
367 }
368 }
369
rt_hw_secondary_cpu_bsp_start(void)370 rt_weak void rt_hw_secondary_cpu_bsp_start(void)
371 {
372 int cpu_id = rt_hw_cpu_id();
373
374 system_vectors_init();
375
376 rt_hw_spin_lock(&_cpus_lock);
377
378 /* Save all mpidr */
379 rt_hw_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
380
381 rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
382
383 #ifdef RT_USING_PIC
384 rt_pic_irq_init();
385 #else
386 /* initialize vector table */
387 rt_hw_vector_init();
388
389 arm_gic_cpu_init(0, 0);
390 #ifdef BSP_USING_GICV3
391 arm_gic_redist_init(0, 0);
392 #endif /* BSP_USING_GICV3 */
393 #endif
394
395 #ifndef RT_HWTIMER_ARM_ARCH
396 /* initialize timer for os tick */
397 rt_hw_gtimer_local_enable();
398 #endif /* !RT_HWTIMER_ARM_ARCH */
399
400 rt_dm_secondary_cpu_init();
401
402 rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
403 rt_hw_interrupt_umask(RT_STOP_IPI);
404 rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
405
406 LOG_I("Call cpu %d on %s", cpu_id, "success");
407
408 #ifdef RT_USING_HWTIMER
409 if (rt_device_hwtimer_us_delay == &cpu_us_delay)
410 {
411 cpu_loops_per_tick_init();
412 }
413 #endif
414
415 rt_system_scheduler_start();
416 }
417
rt_hw_secondary_cpu_idle_exec(void)418 rt_weak void rt_hw_secondary_cpu_idle_exec(void)
419 {
420 rt_hw_wfe();
421 }
422 #endif
423
rt_hw_console_output(const char * str)424 void rt_hw_console_output(const char *str)
425 {
426 rt_fdt_earlycon_output(str);
427 }
428