1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2010-01-25 Bernard first version
9 * 2012-05-31 aozima Merge all of the C source code into cpuport.c
10 * 2012-08-17 aozima fixed bug: store r8 - r11.
11 * 2012-12-23 aozima stack addr align to 8byte.
12 * 2023-01-22 rose_man add RT_USING_SMP
13 */
14
15 #include <rtthread.h>
16 #include <rthw.h>
17 #include <rtthread.h>
18 #include <stdint.h>
19
20 #include "board.h"
21
22 #ifdef RT_USING_SMP
23
24 #include "hardware/structs/sio.h"
25 #include "hardware/irq.h"
26 #include "pico/sync.h"
27 #include "pico/multicore.h"
28
rt_hw_cpu_id(void)29 int rt_hw_cpu_id(void)
30 {
31 return sio_hw->cpuid;
32 }
33
rt_hw_spin_lock_init(rt_hw_spinlock_t * lock)34 void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
35 {
36 static uint8_t spin_cnt = 0;
37
38 if ( spin_cnt < 32)
39 {
40 lock->slock = (rt_uint32_t)spin_lock_instance(spin_cnt);
41 spin_cnt = spin_cnt + 1;
42 }
43 else
44 {
45 lock->slock = 0;
46 }
47 }
48
rt_hw_spin_lock(rt_hw_spinlock_t * lock)49 void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
50 {
51 if ( lock->slock != 0 )
52 {
53 spin_lock_unsafe_blocking((spin_lock_t*)lock->slock);
54 }
55 }
56
rt_hw_spin_unlock(rt_hw_spinlock_t * lock)57 void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
58 {
59 if ( lock->slock != 0 )
60 {
61 spin_unlock_unsafe((spin_lock_t*)lock->slock);
62 }
63 }
64
secondary_cpu_c_start(void)65 void secondary_cpu_c_start(void)
66 {
67 irq_set_enabled(SIO_IRQ_PROC1,RT_TRUE);
68
69 extern uint32_t systick_config(uint32_t ticks);
70 systick_config(frequency_count_khz(CLOCKS_FC0_SRC_VALUE_ROSC_CLKSRC)*10000/RT_TICK_PER_SECOND);
71
72 rt_hw_spin_lock(&_cpus_lock);
73
74 rt_system_scheduler_start();
75 }
76
rt_hw_secondary_cpu_up(void)77 void rt_hw_secondary_cpu_up(void)
78 {
79 multicore_launch_core1(secondary_cpu_c_start);
80
81 irq_set_enabled(SIO_IRQ_PROC0,RT_TRUE);
82 }
83
rt_hw_secondary_cpu_idle_exec(void)84 void rt_hw_secondary_cpu_idle_exec(void)
85 {
86 asm volatile ("wfi");
87 }
88
89 #define IPI_MAGIC 0x5a5a
90
rt_hw_ipi_send(int ipi_vector,unsigned int cpu_mask)91 void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
92 {
93 sio_hw->fifo_wr = IPI_MAGIC;
94 }
95
rt_hw_ipi_handler(void)96 void rt_hw_ipi_handler(void)
97 {
98 uint32_t status = sio_hw->fifo_st;
99
100 if ( status & (SIO_FIFO_ST_ROE_BITS | SIO_FIFO_ST_WOF_BITS) )
101 {
102 sio_hw->fifo_st = 0;
103 }
104
105 if ( status & SIO_FIFO_ST_VLD_BITS )
106 {
107 if ( sio_hw->fifo_rd == IPI_MAGIC )
108 {
109 //rt_schedule();
110 }
111 }
112 }
113
isr_irq15(void)114 void isr_irq15(void)
115 {
116 rt_hw_ipi_handler();
117 }
118
isr_irq16(void)119 void isr_irq16(void)
120 {
121 rt_hw_ipi_handler();
122 }
123 struct __rt_thread_switch_array
124 {
125 rt_ubase_t flag;
126 rt_ubase_t from;
127 rt_ubase_t to;
128 };
129 struct __rt_thread_switch_array rt_thread_switch_array[2] = { {0,0,0}, {0,0,0} };
130
__rt_cpu_switch(rt_ubase_t from,rt_ubase_t to,struct rt_thread * thread)131 void __rt_cpu_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *thread)
132 {
133 struct rt_cpu* pcpu = rt_cpu_self();
134 rt_uint32_t cpuid = rt_hw_cpu_id();
135
136 if ( rt_thread_switch_array[cpuid].flag != 1)
137 {
138 rt_thread_switch_array[cpuid].flag = 1;
139 rt_thread_switch_array[cpuid].from = from;
140 }
141 rt_thread_switch_array[cpuid].to = to;
142
143 if ( pcpu->current_thread != RT_NULL )
144 {
145 thread->cpus_lock_nest = pcpu->current_thread->cpus_lock_nest;
146 thread->critical_lock_nest = pcpu->current_thread->critical_lock_nest;
147 thread->scheduler_lock_nest = pcpu->current_thread->scheduler_lock_nest;
148 }
149
150 pcpu->current_thread = thread;
151 if (!thread->cpus_lock_nest)
152 {
153 rt_hw_spin_unlock(&_cpus_lock);
154 }
155 }
156
157 #endif /*RT_USING_SMP*/
158
159 struct exception_stack_frame
160 {
161 rt_uint32_t r0;
162 rt_uint32_t r1;
163 rt_uint32_t r2;
164 rt_uint32_t r3;
165 rt_uint32_t r12;
166 rt_uint32_t lr;
167 rt_uint32_t pc;
168 rt_uint32_t psr;
169 };
170
171 struct stack_frame
172 {
173 /* r4 ~ r7 low register */
174 rt_uint32_t r4;
175 rt_uint32_t r5;
176 rt_uint32_t r6;
177 rt_uint32_t r7;
178
179 /* r8 ~ r11 high register */
180 rt_uint32_t r8;
181 rt_uint32_t r9;
182 rt_uint32_t r10;
183 rt_uint32_t r11;
184
185 struct exception_stack_frame exception_stack_frame;
186 };
187
188 /* flag in interrupt handling */
189 rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
190 rt_uint32_t rt_thread_switch_interrupt_flag;
191
192 /**
193 * This function will initialize thread stack
194 *
195 * @param tentry the entry of thread
196 * @param parameter the parameter of entry
197 * @param stack_addr the beginning stack address
198 * @param texit the function will be called when thread exit
199 *
200 * @return stack address
201 */
rt_hw_stack_init(void * tentry,void * parameter,rt_uint8_t * stack_addr,void * texit)202 rt_uint8_t *rt_hw_stack_init(void *tentry,
203 void *parameter,
204 rt_uint8_t *stack_addr,
205 void *texit)
206 {
207 struct stack_frame *stack_frame;
208 rt_uint8_t *stk;
209 unsigned long i;
210
211 stk = stack_addr + sizeof(rt_uint32_t);
212 stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
213 stk -= sizeof(struct stack_frame);
214
215 stack_frame = (struct stack_frame *)stk;
216
217 /* init all register */
218 for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
219 {
220 ((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
221 }
222
223 stack_frame->exception_stack_frame.r0 = (unsigned long)parameter; /* r0 : argument */
224 stack_frame->exception_stack_frame.r1 = 0; /* r1 */
225 stack_frame->exception_stack_frame.r2 = 0; /* r2 */
226 stack_frame->exception_stack_frame.r3 = 0; /* r3 */
227 stack_frame->exception_stack_frame.r12 = 0; /* r12 */
228 stack_frame->exception_stack_frame.lr = (unsigned long)texit; /* lr */
229 stack_frame->exception_stack_frame.pc = (unsigned long)tentry; /* entry point, pc */
230 stack_frame->exception_stack_frame.psr = 0x01000000L; /* PSR */
231
232 /* return task's current stack address */
233 return stk;
234 }
235
236 #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
237 extern long list_thread(void);
238 #endif
239 extern rt_thread_t rt_current_thread;
240 /**
241 * fault exception handling
242 */
rt_hw_hard_fault_exception(struct exception_stack_frame * contex)243 void rt_hw_hard_fault_exception(struct exception_stack_frame *contex)
244 {
245 rt_kprintf("psr: 0x%08x\n", contex->psr);
246 rt_kprintf(" pc: 0x%08x\n", contex->pc);
247 rt_kprintf(" lr: 0x%08x\n", contex->lr);
248 rt_kprintf("r12: 0x%08x\n", contex->r12);
249 rt_kprintf("r03: 0x%08x\n", contex->r3);
250 rt_kprintf("r02: 0x%08x\n", contex->r2);
251 rt_kprintf("r01: 0x%08x\n", contex->r1);
252 rt_kprintf("r00: 0x%08x\n", contex->r0);
253
254 #ifdef RT_USING_SMP
255 rt_thread_t rt_current_thread = rt_thread_self();
256 rt_kprintf("hard fault on cpu : %d on thread: %s\n", rt_current_thread->oncpu, rt_current_thread->parent.name);
257 #else
258 rt_kprintf("hard fault on thread: %s\n", rt_current_thread->parent.name);
259 #endif
260 #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
261 list_thread();
262 #endif
263
264 while (1);
265 }
266
267 #define SCB_CFSR (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
268 #define SCB_HFSR (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
269 #define SCB_MMAR (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
270 #define SCB_BFAR (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
271 #define SCB_AIRCR (*(volatile unsigned long *)0xE000ED0C) /* Reset control Address Register */
272 #define SCB_RESET_VALUE 0x05FA0004 /* Reset value, write to SCB_AIRCR can reset cpu */
273
274 #define SCB_CFSR_MFSR (*(volatile const unsigned char*)0xE000ED28) /* Memory-management Fault Status Register */
275 #define SCB_CFSR_BFSR (*(volatile const unsigned char*)0xE000ED29) /* Bus Fault Status Register */
276 #define SCB_CFSR_UFSR (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
277
278 /**
279 * reset CPU
280 */
rt_hw_cpu_reset(void)281 void rt_hw_cpu_reset(void)
282 {
283 SCB_AIRCR = SCB_RESET_VALUE;//((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |SCB_AIRCR_SYSRESETREQ_Msk);
284 }
285