1/*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date           Author       Notes
8 * 2013-07-05     Bernard      the first version
9 * 2018-11-22     Jesven       in the interrupt context, use rt_scheduler_do_irq_switch checks
10 *                             and switches to a new thread
11 * 2024-01-16     huanghe      restructure this code section following the aarch64 architectural style
12 */
13
14#include "rtconfig.h"
15
16#define ARM_CPU_STACK_SIZE_OFFSET   12
17#define ARM_CPU_STACK_SIZE  (1<<ARM_CPU_STACK_SIZE_OFFSET)
18
19.equ Mode_USR,        0x10
20.equ Mode_FIQ,        0x11
21.equ Mode_IRQ,        0x12
22.equ Mode_SVC,        0x13
23.equ Mode_ABT,        0x17
24.equ Mode_UND,        0x1B
25.equ Mode_SYS,        0x1F
26
27.equ I_Bit,           0x80   /* when I bit is set, IRQ is disabled */
28.equ F_Bit,           0x40   /* when F bit is set, FIQ is disabled */
29
30/*Load the physical address of a symbol into a register.
31  Through pv_off calculates the offset of the physical address */
32.macro get_phy, reg, symbol, _pvoff
33    ldr \reg, =\symbol
34    add \reg, \_pvoff
35.endm
36/*Calculate the offset between the physical address and the virtual address of the "_reset".*/
37.macro get_pvoff, tmp, out
38    ldr     \tmp, =_reset
39    adr     \out, _reset
40    sub     \out, \out, \tmp
41.endm
42
43pv_off       .req r11 /* Used to store the offset between physical address and the virtual address */
44cpu_id       .req r10 /* Used to store the cpu id */
45
46/* reset entry */
47    .globl _reset
48_reset:
49    /* Calculate the offset between the physical address and the virtual address */
50    get_pvoff r0, pv_off
51
52    /* exit hyp mode  */
53    bl init_cpu_mode
54    /* clear bss section */
55    bl init_kernel_bss
56    /* Initializes the assembly environment stack  */
57    bl init_cpu_stack_early
58
59    /* init mmu  */
60    b init_mmu_page_table_early
61
62init_cpu_stack_early:
63
64    cps #Mode_SVC
65
66    get_phy r0, svc_stack_top, pv_off
67    mov     sp, r0
68
69#ifdef RT_USING_FPU
70    mov r4, #0xfffffff
71    mcr p15, 0, r4, c1, c0, 2
72#endif
73
74    mov pc, lr
75
76init_kernel_bss:
77
78    /* enable I cache + branch prediction */
79    mrc p15, 0, r0, c1, c0, 0
80    orr     r0, r0, #(1<<12)
81    orr     r0, r0, #(1<<11)
82    mcr p15, 0, r0, c1, c0, 0
83
84    mov r0,#0                   /* get a zero      */
85    get_phy r1, __bss_start, pv_off
86    get_phy r2, __bss_end, pv_off
87
88bss_loop:
89    cmp r1,r2                   /* check if data to clear           */
90    strlo r0,[r1],#4            /* clear 4 bytes                    */
91    blo bss_loop                /* loop until done                  */
92
93    mov pc, lr
94
95init_cpu_mode:
96
97#ifdef ARCH_ARMV8
98    /* Check for HYP mode */
99    mrs r0, cpsr_all
100    and r0, r0, #0x1F
101    mov r8, #0x1A
102    cmp r0, r8
103    beq overHyped
104    b continue_exit
105
106overHyped: /* Get out of HYP mode */
107    mov r9, lr
108    /* HYP mode has a dedicated register, called ELR_hyp,
109    to store the exception return address.
110    The lr register needs to be temporarily saved,
111    otherwise "mov pc lr" cannot be used after switching modes. */
112    adr r1, continue_exit
113    msr ELR_hyp, r1
114    mrs r1, cpsr_all
115    and r1, r1, #0xFFFFFFE0    /* CPSR_MODE_MASK */
116    orr r1, r1, #0x13          /* CPSR_MODE_SUPERVISOR */
117    msr SPSR_hyp, r1
118    eret
119
120continue_exit:
121    mov lr ,r9
122
123#endif
124#ifdef SOC_BCM283x
125    /* Suspend the other cpu cores */
126    mrc p15, 0, r0, c0, c0, 5
127    ands r0, #3
128    bne _halt
129
130    /* Disable IRQ & FIQ */
131    cpsid if
132
133    /* Check for HYP mode */
134    mrs r0, cpsr_all
135    and r0, r0, #0x1F
136    mov r8, #0x1A
137    cmp r0, r8
138    beq overHyped
139    b continue_exit
140
141overHyped: /* Get out of HYP mode */
142    mov r9, lr
143    /* HYP mode has a dedicated register, called ELR_hyp,
144    to store the exception return address.
145    The lr register needs to be temporarily saved,
146    otherwise "mov pc lr" cannot be used after switching modes. */
147    adr r1, continue_exit
148    msr ELR_hyp, r1
149    mrs r1, cpsr_all
150    and r1, r1, #0xFFFFFFE0    /* CPSR_MODE_MASK */
151    orr r1, r1, #0x13          /* CPSR_MODE_SUPERVISOR */
152    msr SPSR_hyp, r1
153    eret
154
155continue_exit:
156    mov lr ,r9
157    /* set the cpu to SVC32 mode and disable interrupt */
158    mrs r0, cpsr
159    bic r0, r0, #0x1f
160    orr r0, r0, #0x13
161    msr cpsr_c, r0
162
163#endif
164
165    /* invalid tlb before enable mmu */
166    mrc p15, 0, r0, c1, c0, 0
167    bic r0, #1
168    mcr p15, 0, r0, c1, c0, 0
169    dsb
170    isb
171
172    mov r0, #0
173    mcr p15, 0, r0, c8, c7, 0
174    mcr p15, 0, r0, c7, c5, 0    /* iciallu */
175    mcr p15, 0, r0, c7, c5, 6    /* bpiall */
176    dsb
177    isb
178
179    mov pc, lr
180
181init_mmu_page_table_early:
182    get_phy r0, init_mtbl, pv_off
183    mov r1, pv_off
184    bl rt_hw_mem_setup_early
185
186    /* get cpu id */
187    bl rt_hw_cpu_id_early
188    mov cpu_id ,r0
189    /* enable_mmu_page_table_early is changed to master_core_startup */
190    ldr     lr, =master_core_startup
191
192    cmp cpu_id, #0
193    beq enable_mmu_page_table_early
194
195
196#ifdef RT_USING_SMP
197#ifdef RT_SMP_AUTO_BOOT
198    /* if cpu id > 0, stop or wait */
199    ldr r0, =secondary_cpu_entry
200    mov r1, #0
201    str r1, [r0] /* clean secondary_cpu_entry */
202#endif
203#endif
204
205secondary_loop:
206    @ cpu core 1 goes into sleep until core 0 wakeup it
207    wfe
208#ifdef RT_SMP_AUTO_BOOT
209    ldr r1, =secondary_cpu_entry
210    ldr r0, [r1]
211    cmp r0, #0
212    blxne r0 /* if(secondary_cpu_entry) secondary_cpu_entry(); */
213#endif /* RT_SMP_AUTO_BOOT */
214    b secondary_loop
215
216enable_mmu_page_table_early:
217    /* init TTBR0  */
218    get_phy r0, init_mtbl, pv_off
219    mcr     p15, #0, r0, c2, c0, #0
220    dmb
221
222    ldr     r0,=#0x55555555
223    mcr     p15, #0, r0, c3, c0, #0
224
225    /* disable ttbr1 */
226    mov r0, #(1 << 5)            /* PD1=1 */
227    mcr p15, 0, r0, c2, c0, 2    /* ttbcr */
228
229
230    /*  init stack for cpu mod */
231    cps #Mode_UND
232    ldr r1,=und_stack_top
233    sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET
234
235
236    cps #Mode_IRQ
237    ldr r1, =irq_stack_top
238    sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET
239
240
241    cps #Mode_FIQ
242    ldr r1, =irq_stack_top
243    sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET
244
245
246    cps #Mode_ABT
247    ldr r1, =abt_stack_top
248    sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET
249
250
251    cps #Mode_SVC
252    ldr r1, =svc_stack_top
253    sub sp, r1, cpu_id, asl #ARM_CPU_STACK_SIZE_OFFSET
254
255
256    /* invalid tlb before enable mmu */
257    mov r0, #0
258    mcr p15, 0, r0, c8, c7, 0
259    mcr p15, 0, r0, c7, c5, 0    /* iciallu */
260    mcr p15, 0, r0, c7, c5, 6    /* bpiall */
261
262    mrc p15, 0, r0, c1, c0, 0
263    bic r0, r0, #0x7                    /* clear bit1~3 */
264    orr r0, #((1 << 12) | (1 << 11))    /* instruction cache, branch prediction */
265    orr r0, #((1 << 2) | (1 << 0))      /* data cache, mmu enable */
266    mcr p15, 0, r0, c1, c0, 0
267    dsb
268    isb
269
270    mov pc, lr
271
272master_core_startup :
273    mov r0 ,pv_off
274    bl rt_kmem_pvoff_set
275
276    ldr     lr, =rtthread_startup
277    mov pc, lr
278
279.global rt_hw_mmu_tbl_get
280rt_hw_mmu_tbl_get:
281    mrc p15, 0, r0, c2, c0, 0    /* ttbr0 */
282    bic r0, #0x18
283    mov pc, lr
284
285.weak rt_hw_cpu_id_early
286rt_hw_cpu_id_early:
287    mrc p15, 0, r0, c0, c0, 5
288    and r0, r0, #0xf
289    mov pc, lr
290
291#ifdef RT_USING_SMP
292.global rt_secondary_cpu_entry
293rt_secondary_cpu_entry:
294    ldr r0, =_reset
295    adr pv_off, _reset
296    sub pv_off, pv_off, r0
297
298    bl init_cpu_stack_early
299
300    /* init mmu  */
301    bl rt_hw_cpu_id_early
302    mov cpu_id ,r0
303
304    ldr lr ,= rt_hw_secondary_cpu_bsp_start
305    b enable_mmu_page_table_early
306#endif
307
308
309/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
310.section .text.isr, "ax"
311    .align  5
312.globl vector_fiq
313vector_fiq:
314    stmfd   sp!,{r0-r7,lr}
315    bl      rt_hw_trap_fiq
316    ldmfd   sp!,{r0-r7,lr}
317    subs    pc, lr, #4
318
319.globl      rt_interrupt_enter
320.globl      rt_interrupt_leave
321.globl      rt_thread_switch_interrupt_flag
322.globl      rt_interrupt_from_thread
323.globl      rt_interrupt_to_thread
324
325.globl      rt_current_thread
326.globl      vmm_thread
327.globl      vmm_virq_check
328
329    .align  5
330.globl vector_irq
331vector_irq:
332#ifdef RT_USING_SMP
333    stmfd   sp!, {r0, r1}
334    cps     #Mode_SVC
335    mov     r0, sp          /* svc_sp */
336    mov     r1, lr          /* svc_lr */
337
338    cps     #Mode_IRQ
339    sub     lr, #4
340    stmfd   r0!, {r1, lr}     /* svc_lr, svc_pc */
341    stmfd   r0!, {r2 - r12}
342    ldmfd   sp!, {r1, r2}     /* original r0, r1 */
343    stmfd   r0!, {r1 - r2}
344    mrs     r1,  spsr         /* original mode */
345    stmfd   r0!, {r1}
346
347#ifdef RT_USING_SMART
348    stmfd   r0, {r13, r14}^   /* usr_sp, usr_lr */
349    sub     r0, #8
350#endif
351#ifdef RT_USING_FPU
352    /* fpu context */
353    vmrs r6, fpexc
354    tst  r6, #(1<<30)
355    beq 1f
356    vstmdb r0!, {d0-d15}
357    vstmdb r0!, {d16-d31}
358    vmrs r5, fpscr
359    stmfd r0!, {r5}
3601:
361    stmfd r0!, {r6}
362#endif
363
364    /* now irq stack is clean */
365    /* r0 is task svc_sp */
366    /* backup r0 -> r8 */
367    mov r8, r0
368
369    cps     #Mode_SVC
370    mov     sp, r8
371
372    bl      rt_interrupt_enter
373    bl      rt_hw_trap_irq
374    bl      rt_interrupt_leave
375
376    mov     r0, r8
377    bl      rt_scheduler_do_irq_switch
378
379    b       rt_hw_context_switch_exit
380
381#else
382    stmfd   sp!, {r0-r12,lr}
383
384    bl      rt_interrupt_enter
385    bl      rt_hw_trap_irq
386    bl      rt_interrupt_leave
387
388    /* if rt_thread_switch_interrupt_flag set, jump to
389     * rt_hw_context_switch_interrupt_do and don't return */
390    ldr     r0, =rt_thread_switch_interrupt_flag
391    ldr     r1, [r0]
392    cmp     r1, #1
393    beq     rt_hw_context_switch_interrupt_do
394
395#ifdef RT_USING_SMART
396    ldmfd   sp!, {r0-r12,lr}
397    cps     #Mode_SVC
398    push    {r0-r12}
399    mov     r7, lr
400    cps     #Mode_IRQ
401    mrs     r4, spsr
402    sub     r5, lr, #4
403    cps     #Mode_SVC
404    and     r6, r4, #0x1f
405    cmp     r6, #0x10
406    bne     1f
407    msr     spsr_csxf, r4
408    mov     lr, r5
409    pop     {r0-r12}
410    b       arch_ret_to_user
4111:
412    mov     lr, r7
413    cps     #Mode_IRQ
414    msr     spsr_csxf, r4
415    mov     lr, r5
416    cps     #Mode_SVC
417    pop     {r0-r12}
418    cps     #Mode_IRQ
419    movs    pc, lr
420#else
421    ldmfd   sp!, {r0-r12,lr}
422    subs    pc,  lr, #4
423#endif
424
425rt_hw_context_switch_interrupt_do:
426    mov     r1,  #0             /* clear flag */
427    str     r1,  [r0]
428
429    mov     r1, sp              /* r1 point to {r0-r3} in stack */
430    add     sp, sp, #4*4
431    ldmfd   sp!, {r4-r12,lr}    /* reload saved registers */
432    mrs     r0,  spsr           /* get cpsr of interrupt thread */
433    sub     r2,  lr, #4         /* save old task's pc to r2 */
434
435    /* Switch to SVC mode with no interrupt. If the usr mode guest is
436     * interrupted, this will just switch to the stack of kernel space.
437     * save the registers in kernel space won't trigger data abort. */
438    msr     cpsr_c, #I_Bit|F_Bit|Mode_SVC
439
440    stmfd   sp!, {r2}           /* push old task's pc */
441    stmfd   sp!, {r4-r12,lr}    /* push old task's lr,r12-r4 */
442    ldmfd   r1,  {r1-r4}        /* restore r0-r3 of the interrupt thread */
443    stmfd   sp!, {r1-r4}        /* push old task's r0-r3 */
444    stmfd   sp!, {r0}           /* push old task's cpsr */
445
446#ifdef RT_USING_SMART
447    stmfd   sp, {r13, r14}^     /*push usr_sp, usr_lr */
448    sub     sp, #8
449#endif
450
451#ifdef RT_USING_FPU
452    /* fpu context */
453    vmrs r6, fpexc
454    tst  r6, #(1<<30)
455    beq 1f
456    vstmdb sp!, {d0-d15}
457    vstmdb sp!, {d16-d31}
458    vmrs r5, fpscr
459    stmfd sp!, {r5}
4601:
461    stmfd sp!, {r6}
462#endif
463
464    ldr     r4,  =rt_interrupt_from_thread
465    ldr     r5,  [r4]
466    str     sp,  [r5]       /* store sp in preempted tasks's TCB */
467
468    ldr     r6,  =rt_interrupt_to_thread
469    ldr     r6,  [r6]
470    ldr     sp,  [r6]       /* get new task's stack pointer */
471
472#ifdef RT_USING_SMART
473    bl      rt_thread_self
474    mov     r4, r0
475    bl      lwp_aspace_switch
476    mov     r0, r4
477    bl      lwp_user_setting_restore
478#endif
479
480#ifdef RT_USING_FPU
481    /* fpu context */
482    ldmfd sp!, {r6}
483    vmsr fpexc, r6
484    tst  r6, #(1<<30)
485    beq 1f
486    ldmfd sp!, {r5}
487    vmsr fpscr, r5
488    vldmia sp!, {d16-d31}
489    vldmia sp!, {d0-d15}
4901:
491#endif
492
493#ifdef RT_USING_SMART
494    ldmfd sp, {r13, r14}^    /*pop usr_sp, usr_lr */
495    add sp, #8
496#endif
497
498    ldmfd   sp!, {r4}        /* pop new task's cpsr to spsr */
499    msr     spsr_cxsf, r4
500
501#ifdef RT_USING_SMART
502    and     r4, #0x1f
503    cmp     r4, #0x10
504    bne     1f
505    ldmfd   sp!, {r0-r12,lr}
506    ldmfd   sp!, {lr}
507    b       arch_ret_to_user
5081:
509#endif
510    /* pop new task's r0-r12,lr & pc, copy spsr to cpsr */
511    ldmfd   sp!, {r0-r12,lr,pc}^
512
513#endif
514
515.macro push_svc_reg
516    sub     sp, sp, #17 * 4         /* Sizeof(struct rt_hw_exp_stack)  */
517    stmia   sp, {r0 - r12}          /* Calling r0-r12                  */
518    mov     r0, sp
519    add     sp, sp, #17 * 4
520    mrs     r6, spsr                /* Save CPSR                       */
521    str     lr, [r0, #15*4]         /* Push PC                         */
522    str     r6, [r0, #16*4]         /* Push CPSR                       */
523    and     r1, r6, #0x1f
524    cmp     r1, #0x10
525    cps     #Mode_SYS
526    streq   sp, [r0, #13*4]         /* Save calling SP                 */
527    streq   lr, [r0, #14*4]         /* Save calling PC                 */
528    cps     #Mode_SVC
529    strne   sp, [r0, #13*4]         /* Save calling SP                 */
530    strne   lr, [r0, #14*4]         /* Save calling PC                 */
531.endm
532
533    .align  5
534.weak vector_swi
535vector_swi:
536    push_svc_reg
537    bl      rt_hw_trap_swi
538    b       .
539
540    .align  5
541    .globl  vector_undef
542vector_undef:
543    push_svc_reg
544    bl      rt_hw_trap_undef
545#ifdef RT_USING_FPU
546    cps     #Mode_UND
547    sub     sp, sp, #17 * 4
548    ldr     lr, [sp, #15*4]
549    ldmia   sp, {r0 - r12}
550    add     sp, sp, #17 * 4
551    movs    pc, lr
552#endif
553    b       .
554
555    .align  5
556    .globl  vector_pabt
557vector_pabt:
558    push_svc_reg
559#ifdef RT_USING_SMART
560    /* cp Mode_ABT stack to SVC */
561    sub     sp, sp, #17 * 4     /* Sizeof(struct rt_hw_exp_stack)  */
562    mov     lr, r0
563    ldmia   lr, {r0 - r12}
564    stmia   sp, {r0 - r12}
565    add     r1, lr, #13 * 4
566    add     r2, sp, #13 * 4
567    ldmia   r1, {r4 - r7}
568    stmia   r2, {r4 - r7}
569    mov     r0, sp
570    bl      rt_hw_trap_pabt
571    /* return to user */
572    ldr     lr, [sp, #16*4]     /* orign spsr */
573    msr     spsr_cxsf, lr
574    ldr     lr, [sp, #15*4]     /* orign pc */
575    ldmia   sp, {r0 - r12}
576    add     sp, #17 * 4
577    b       arch_ret_to_user
578#else
579    bl      rt_hw_trap_pabt
580    b       .
581#endif
582
583    .align  5
584    .globl  vector_dabt
585vector_dabt:
586    push_svc_reg
587#ifdef RT_USING_SMART
588    /* cp Mode_ABT stack to SVC */
589    sub     sp, sp, #17 * 4    /* Sizeof(struct rt_hw_exp_stack)  */
590    mov     lr, r0
591    ldmia   lr, {r0 - r12}
592    stmia   sp, {r0 - r12}
593    add     r1, lr, #13 * 4
594    add     r2, sp, #13 * 4
595    ldmia   r1, {r4 - r7}
596    stmia   r2, {r4 - r7}
597    mov     r0, sp
598    bl      rt_hw_trap_dabt
599    /* return to user */
600    ldr     lr, [sp, #16*4]    /* orign spsr */
601    msr     spsr_cxsf, lr
602    ldr     lr, [sp, #15*4]    /* orign pc */
603    ldmia   sp, {r0 - r12}
604    add     sp, #17 * 4
605    b       arch_ret_to_user
606#else
607    bl      rt_hw_trap_dabt
608    b       .
609#endif
610
611    .align  5
612    .globl  vector_resv
613vector_resv:
614    push_svc_reg
615    bl      rt_hw_trap_resv
616    b       .
617
618.global rt_hw_clz
619rt_hw_clz:
620    clz r0, r0
621    bx lr
622
623
624#include "asm-generic.h"
625
626START_POINT(_thread_start)
627    mov     r10, lr
628    blx     r1
629    blx     r10
630    b       .   /* never here */
631START_POINT_END(_thread_start)
632
633.data
634.align 14
635init_mtbl:
636    .space  (4*4096) /* The  L1 translation table therefore contains 4096 32-bit (word-sized) entries.  */
637
638.global rt_hw_mmu_switch
639rt_hw_mmu_switch:
640    orr r0, #0x18
641    mcr p15, 0, r0, c2, c0, 0       // ttbr0
642                                    //invalid tlb
643    mov r0, #0
644    mcr p15, 0, r0, c8, c7, 0
645    mcr p15, 0, r0, c7, c5, 0       //iciallu
646    mcr p15, 0, r0, c7, c5, 6       //bpiall
647
648    dsb
649    isb
650    mov pc, lr
651
652
653.global rt_hw_set_process_id
654rt_hw_set_process_id:
655    LSL r0, r0, #8
656    MCR p15, 0, r0, c13, c0, 1
657    mov pc, lr
658
659
660.bss
661.align 3     /* align to  2~3=8 */
662
663.cpus_stack:
664svc_stack_n:
665#if defined(RT_USING_SMP) && (RT_CPUS_NR > 1)
666    .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE)
667#endif
668    .space (ARM_CPU_STACK_SIZE)
669svc_stack_top:
670
671irq_stack_n:
672#if defined(RT_USING_SMP) && (RT_CPUS_NR > 1)
673    .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE)
674#endif
675    .space (ARM_CPU_STACK_SIZE)
676irq_stack_top:
677
678
679und_stack_n:
680#if defined(RT_USING_SMP) && (RT_CPUS_NR > 1)
681    .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE)
682#endif
683    .space (ARM_CPU_STACK_SIZE)
684und_stack_top:
685
686abt_stack_n:
687#if defined(RT_USING_SMP) && (RT_CPUS_NR > 1)
688    .space ((RT_CPUS_NR - 1) * ARM_CPU_STACK_SIZE)
689#endif
690    .space (ARM_CPU_STACK_SIZE)
691abt_stack_top:
692
693
694
695
696