1/*
2 * Copyright (c) 2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8#include <lk/asm.h>
9#include <arch/defines.h>
10#include <arch/riscv/asm.h>
11#include <arch/riscv/mmu.h>
12#include "config.h"
13
14.section ".text.boot"
15FUNCTION(_start)
16.option push
17.option norelax
18    // set the global pointer
19    lla     gp, __global_pointer$
20.option pop
21
22#if RISCV_M_MODE
23    // copy the hart id into a0 which we'll use later
24    // supervisor mode should already have hart id in a0
25    csrr    a0, mhartid
26#endif
27
28    // cpu lottery: whoever sets this first gets to be cpu 0
29    lla     t0, _boot_lottery
30    li      t1, 1
31    amoadd.w a2, t1, (t0)
32
33    // a2 now holds the logical cpu number. a2 is used because it is
34    // the first unused argument register on SBI based systems,
35    // which seem to use a0 and a1.
36
37    // if this cpu is out of range, trap it
38    li      t0, SMP_MAX_CPUS
39    ble     t0, a2, hart_trap
40
41    // set the default stack per cpu
42    lla     sp, default_stack_top
43    // default stack locations for each cpu:
44    // LOW ------------ HIGH
45    // [cpu2][cpu1][cpu0]
46    li      t1, ARCH_DEFAULT_STACK_SIZE
47    mul     t1, t1, a2
48    sub     sp, sp, t1
49
50    // if we aren't cpu 0, go hang out in secondary cpu purgatory for now
51    bne     a2, zero, secondary_trap
52
53#if ARCH_RISCV_TWOSEGMENT
54    // copy preinitialized data from flash to memory
55    lla     t0, __data_start_rom
56    lla     t1, __data_start
57    lla     t2, __data_end
58    beq     t0, t1, 1f
59
600:
61    LDR     t3, (t0)
62    STR     t3, (t1)
63    add     t0, t0, RISCV_XLEN_BYTES
64    add     t1, t1, RISCV_XLEN_BYTES
65    bne     t1, t2, 0b
66#endif
67
68    // zero bss
691:
70    lla     t0, __bss_start
71    lla     t1, __bss_end
72    beq     t0, t1, 1f
730:
74    STR     zero, (t0)
75    add     t0, t0, RISCV_XLEN_BYTES
76    bne     t0, t1, 0b
771:
78
79#if WITH_SMP
80    // Save a copy of _start in physical space. This is later used
81    // as the entry point for secondary cpus.
82    lla     t0, _start
83    lla     t1, _start_physical
84    STR     t0, (t1)
85#endif
86
87#if RISCV_MMU
88    jal     _mmu_init
89#endif
90
91#if WITH_SMP
92    // Release any other harts into riscv_secondary_entry
93    fence   w, w
94    lla     t1, _boot_status
95    li      t0, 1
96    sb      t0, (t1)
97    fence
98#endif
99
100    // call into early C code to set up the percpu structure
101    mv      s0, a0
102    mv      s1, a1
103    mv      s2, a2
104    mv      s3, a3
105    jal     riscv_configure_percpu_early
106    mv      a0, s0
107    mv      a1, s1
108    mv      a2, s2
109    mv      a3, s3
110
111    // call main
112    jal     lk_main
113
114    // should never return here
115    j       .
116END_FUNCTION(_start)
117
118LOCAL_FUNCTION(secondary_trap)
119#if WITH_SMP
120    // wait for _boot_status to be nonzero, then go into riscv_secondary_entry
121    lla     t5, _boot_status
122    lb      t0, (t5)
123    beqz    t0, secondary_trap
124
125    // we've been released by the main cpu and/or we've been booted after the
126    // system has been running a while.
127
128#if RISCV_MMU
129    // enable the mmu on this core
130    jal     .Lenable_mmu
131#endif
132
133    // a0 == hart id
134    // a2 == assigned cpu id (may not be the same)
135
136    // set the per cpu structure before getting into the secondary boot path
137    jal     riscv_configure_percpu_early
138
139    // bootstrap the secondary cpus
140    jal     riscv_secondary_entry
141#else
142    j       hart_trap
143#endif
144END_FUNCTION(secondary_trap)
145
146LOCAL_FUNCTION(hart_trap)
147    // cpus with too high of a hart id go here and spin forever
148    wfi
149    j       hart_trap
150END_FUNCTION(hart_trap)
151
152#if RISCV_MMU
153    // initialize the kernel page tables
154    // for all MMU versions, identity map some amount of memory near 0 and
155    // the same amount at the bottom of the kernel's address space
156LOCAL_FUNCTION(_mmu_init)
157    lla     t0, trampoline_pgtable
158
159    // store the physical address of the pgtable for future use
160    lla     t1, trampoline_pgtable_phys
161    sd      t0, (t1)
162
163    // do the same for the main kernel pgtable
164    lla     t2, kernel_pgtable
165    lla     t1, kernel_pgtable_phys
166    sd      t2, (t1)
167
168    // and the 2nd level tables
169    lla     t2, kernel_l2_pgtable
170    lla     t1, kernel_l2_pgtable_phys
171    sd      t2, (t1)
172
173    // compute kernel pgtable pointer (index 256)
174    addi    t1, t0, (8 * 128)
175    addi    t1, t1, (8 * 128)
176
177    // page table entry: address 0, A, D, G, XWR, V
178    li      t2, (0 | (1<<7) | (1<<6) | (1<<5) | (1<<3) | (1<<2) | (1<<1) | (1<<0))
179
180    // num interations and increment count
181#if RISCV_MMU == 48 || RISCV_MMU == 39
182    // RV48: map the first 512GB of the physical address space at the
183    // bottom of the kernel address space using a single terapage
184    // RV39: map the first 64GB of the physical address space at the
185    // bottom of the kernel address space using 64 1GB gigapages
186    li      t3, RISCV_MMU_PHYSMAP_PAGE_COUNT
187    li      t4, (RISCV_MMU_PHYSMAP_PAGE_SIZE >> 2)
188#else
189#error implement sv32
190#endif
191
192    // loop, writing t3 entries out and incrementing by t4 address.
193    // write both to t0 (index 0 of the kernel page table) and
194    // t1 (starting index of kernel space)
1950:
196    sd      t2, (t1)
197    sd      t2, (t0)
198    add     t2, t2, t4
199    addi    t0, t0, 8
200    addi    t1, t1, 8
201    addi    t3, t3, -1
202    bnez    t3, 0b
203
204    // ensure it's written out
205    fence   w,w
206
207.Lenable_mmu:
208    // set the satp register and enable the mmu
209    // ASID 0, trampoline_pgtable address
210    lla     t0, trampoline_pgtable
211    srli    t1, t0, 12
212#if RISCV_MMU == 48
213    li      t2, (RISCV_SATP_MODE_SV48 << RISCV_SATP_MODE_SHIFT)
214#elif RISCV_MMU == 39
215    li      t2, (RISCV_SATP_MODE_SV39 << RISCV_SATP_MODE_SHIFT)
216#else
217#error implement
218#endif
219    or      t1, t1, t2
220    csrw    satp, t1
221
222    // global tlb fence
223    sfence.vma  zero, zero
224
225    // mmu is initialized and we're running out of an identity physical map
226
227    // save the physical address of .Lhigh
228    lla     t1, .Lhigh
229
230    // bounce to the high address
231    lla     t0, .Lhigh_addr
232    ld      t0, (t0)
233    jr      t0
234
235    // the full virtual address of the .Lhigh label
236.Lhigh_addr:
237    .quad   .Lhigh
238.Lhigh:
239
240    // we're now running at the high virtual address
241    // compute the delta between the old physical and newer high addresses
242    sub     t0, t0, t1
243
244    // fix up the gp, stack pointer, and return address
245    add     gp, gp, t0
246    add     sp, sp, t0
247    add     ra, ra, t0
248    ret
249END_FUNCTION(_mmu_init)
250#endif // RISCV_MMU
251
252.bss
253.align 4
254LOCAL_DATA(default_stack)
255    .skip ARCH_DEFAULT_STACK_SIZE * SMP_MAX_CPUS
256LOCAL_DATA(default_stack_top)
257
258// put boot status in .data so it doesn't get paved over during BSS initialization
259.data
260LOCAL_DATA(_boot_status)
261    .byte  0
262
263.align 2
264LOCAL_DATA(_boot_lottery)
265    .word  0
266