1/* 2 * Copyright (c) 2006-2022, RT-Thread Development Team 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 * 6 * Change Logs: 7 * Date Author Notes 8 * 2009-10-11 Bernard first version 9 * 2012-01-01 aozima support context switch load/store FPU register. 10 * 2013-06-18 aozima add restore MSP feature. 11 * 2013-06-23 aozima support lazy stack optimized. 12 * 2018-07-24 aozima enhancement hard fault exception handler. 13 * 2024-08-13 Evlers allows rewrite to interrupt enable/disable api to support independent interrupts management 14 */ 15 16/** 17 * @addtogroup cortex-m4 18 */ 19/*@{*/ 20 21.cpu cortex-m4 22.syntax unified 23.thumb 24.text 25 26.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */ 27.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */ 28.equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */ 29.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */ 30.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */ 31 32/* 33 * rt_base_t rt_hw_interrupt_disable(); 34 */ 35.global rt_hw_interrupt_disable 36.weak rt_hw_interrupt_disable 37.type rt_hw_interrupt_disable, %function 38rt_hw_interrupt_disable: 39 MRS r0, PRIMASK 40 CPSID I 41 BX LR 42 43/* 44 * void rt_hw_interrupt_enable(rt_base_t level); 45 */ 46.global rt_hw_interrupt_enable 47.weak rt_hw_interrupt_enable 48.type rt_hw_interrupt_enable, %function 49rt_hw_interrupt_enable: 50 MSR PRIMASK, r0 51 BX LR 52 53/* 54 * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to); 55 * r0 --> from 56 * r1 --> to 57 */ 58.global rt_hw_context_switch_interrupt 59.type rt_hw_context_switch_interrupt, %function 60.global rt_hw_context_switch 61.type rt_hw_context_switch, %function 62 63rt_hw_context_switch_interrupt: 64rt_hw_context_switch: 65 /* set rt_thread_switch_interrupt_flag to 1 */ 66 LDR r2, =rt_thread_switch_interrupt_flag 67 LDR r3, [r2] 68 CMP r3, #1 69 BEQ _reswitch 70 MOV r3, #1 71 STR r3, [r2] 72 73 LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */ 74 STR r0, [r2] 75 76_reswitch: 77 LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */ 78 STR r1, [r2] 79 80 LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ 81 LDR r1, =NVIC_PENDSVSET 82 STR r1, [r0] 83 BX LR 84 85/* r0 --> switch from thread stack 86 * r1 --> switch to thread stack 87 * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack 88 */ 89.global PendSV_Handler 90.type PendSV_Handler, %function 91PendSV_Handler: 92 /* disable interrupt to protect context switch */ 93 MRS r2, PRIMASK 94 CPSID I 95 96 /* get rt_thread_switch_interrupt_flag */ 97 LDR r0, =rt_thread_switch_interrupt_flag 98 LDR r1, [r0] 99 CBZ r1, pendsv_exit /* pendsv already handled */ 100 101 /* clear rt_thread_switch_interrupt_flag to 0 */ 102 MOV r1, #0x00 103 STR r1, [r0] 104 105 LDR r0, =rt_interrupt_from_thread 106 LDR r1, [r0] 107 CBZ r1, switch_to_thread /* skip register save at the first time */ 108 109 MRS r1, psp /* get from thread stack pointer */ 110 111#if defined (__VFP_FP__) && !defined(__SOFTFP__) 112 TST lr, #0x10 /* if(!EXC_RETURN[4]) */ 113 IT EQ 114 VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */ 115#endif 116 117 STMFD r1!, {r4 - r11} /* push r4 - r11 register */ 118 119#if defined (__VFP_FP__) && !defined(__SOFTFP__) 120 MOV r4, #0x00 /* flag = 0 */ 121 122 TST lr, #0x10 /* if(!EXC_RETURN[4]) */ 123 IT EQ 124 MOVEQ r4, #0x01 /* flag = 1 */ 125 126 STMFD r1!, {r4} /* push flag */ 127#endif 128 129 LDR r0, [r0] 130 STR r1, [r0] /* update from thread stack pointer */ 131 132switch_to_thread: 133 LDR r1, =rt_interrupt_to_thread 134 LDR r1, [r1] 135 LDR r1, [r1] /* load thread stack pointer */ 136 137#if defined (__VFP_FP__) && !defined(__SOFTFP__) 138 LDMFD r1!, {r3} /* pop flag */ 139#endif 140 141 LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */ 142 143#if defined (__VFP_FP__) && !defined(__SOFTFP__) 144 CMP r3, #0 /* if(flag_r3 != 0) */ 145 IT NE 146 VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */ 147#endif 148 149 MSR psp, r1 /* update stack pointer */ 150 151#if defined (__VFP_FP__) && !defined(__SOFTFP__) 152 ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */ 153 CMP r3, #0 /* if(flag_r3 != 0) */ 154 IT NE 155 BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */ 156#endif 157 158pendsv_exit: 159 /* restore interrupt */ 160 MSR PRIMASK, r2 161 162 ORR lr, lr, #0x04 163 BX lr 164 165/* 166 * void rt_hw_context_switch_to(rt_uint32 to); 167 * r0 --> to 168 */ 169.global rt_hw_context_switch_to 170.type rt_hw_context_switch_to, %function 171rt_hw_context_switch_to: 172 LDR r1, =rt_interrupt_to_thread 173 STR r0, [r1] 174 175#if defined (__VFP_FP__) && !defined(__SOFTFP__) 176 /* CLEAR CONTROL.FPCA */ 177 MRS r2, CONTROL /* read */ 178 BIC r2, #0x04 /* modify */ 179 MSR CONTROL, r2 /* write-back */ 180#endif 181 182 /* set from thread to 0 */ 183 LDR r1, =rt_interrupt_from_thread 184 MOV r0, #0x0 185 STR r0, [r1] 186 187 /* set interrupt flag to 1 */ 188 LDR r1, =rt_thread_switch_interrupt_flag 189 MOV r0, #1 190 STR r0, [r1] 191 192 /* set the PendSV and SysTick exception priority */ 193 LDR r0, =NVIC_SYSPRI2 194 LDR r1, =NVIC_PENDSV_PRI 195 LDR.W r2, [r0,#0x00] /* read */ 196 ORR r1,r1,r2 /* modify */ 197 STR r1, [r0] /* write-back */ 198 199 LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */ 200 LDR r1, =NVIC_PENDSVSET 201 STR r1, [r0] 202 203 /* restore MSP */ 204 LDR r0, =SCB_VTOR 205 LDR r0, [r0] 206 LDR r0, [r0] 207 NOP 208 MSR msp, r0 209 210 /* enable interrupts at processor level */ 211 CPSIE F 212 CPSIE I 213 214 /* clear the BASEPRI register to disable masking priority */ 215 MOV r0, #0x00 216 MSR BASEPRI, r0 217 218 /* ensure PendSV exception taken place before subsequent operation */ 219 DSB 220 ISB 221 222 /* never reach here! */ 223 224/* compatible with old version */ 225.global rt_hw_interrupt_thread_switch 226.type rt_hw_interrupt_thread_switch, %function 227rt_hw_interrupt_thread_switch: 228 BX lr 229 NOP 230 231.global HardFault_Handler 232.type HardFault_Handler, %function 233HardFault_Handler: 234 /* get current context */ 235 MRS r0, msp /* get fault context from handler. */ 236 TST lr, #0x04 /* if(!EXC_RETURN[2]) */ 237 BEQ _get_sp_done 238 MRS r0, psp /* get fault context from thread. */ 239_get_sp_done: 240 241#if defined (__VFP_FP__) && !defined(__SOFTFP__) 242 TST lr, #0x10 /* if(!EXC_RETURN[4]) */ 243 IT EQ 244 VSTMDBEQ r0!, {d8 - d15} /* push FPU register s16~s31 */ 245#endif 246 247 STMFD r0!, {r4 - r11} /* push r4 - r11 register */ 248 249#if defined (__VFP_FP__) && !defined(__SOFTFP__) 250 MOV r4, #0x00 /* flag = 0 */ 251 252 TST lr, #0x10 /* if(!EXC_RETURN[4]) */ 253 IT EQ 254 MOVEQ r4, #0x01 /* flag = 1 */ 255 STMFD r0!, {r4} /* push flag */ 256#endif 257 258 STMFD r0!, {lr} /* push exec_return register */ 259 260 TST lr, #0x04 /* if(!EXC_RETURN[2]) */ 261 BEQ _update_msp 262 MSR psp, r0 /* update stack pointer to PSP. */ 263 B _update_done 264_update_msp: 265 MSR msp, r0 /* update stack pointer to MSP. */ 266_update_done: 267 268 PUSH {LR} 269 BL rt_hw_hard_fault_exception 270 POP {LR} 271 272 ORR lr, lr, #0x04 273 BX lr 274