1 /*
2 * Copyright (c) 2024 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <lib/fdtwalk.h>
9
10 #include <inttypes.h>
11 #include <assert.h>
12 #include <libfdt.h>
13 #include <lk/cpp.h>
14 #include <lk/err.h>
15 #include <lk/trace.h>
16 #include <stdio.h>
17 #include <sys/types.h>
18 #if WITH_KERNEL_VM
19 #include <kernel/vm.h>
20 #else
21 #include <kernel/novm.h>
22 #endif
23 #if ARCH_RISCV
24 #include <arch/riscv.h>
25 #include <arch/riscv/feature.h>
26 #endif
27 #if ARCH_ARM || ARCH_ARM64
28 #include <dev/power/psci.h>
29 #endif
30 #if WITH_DEV_BUS_PCI
31 #include <dev/bus/pci.h>
32 #endif
33
34 // A few helper routines to configure subsystems such as cpu, memory, and pci busses based
35 // on the information in a device tree.
36 //
37 // May eventually move some of these to other locations, but for now dump the helpers here.
38
39 #define LOCAL_TRACE 0
40
fdtwalk_reserve_fdt_memory(const void * fdt,paddr_t fdt_phys)41 status_t fdtwalk_reserve_fdt_memory(const void *fdt, paddr_t fdt_phys) {
42 if (fdt_check_header(fdt) != 0) {
43 return ERR_NOT_FOUND;
44 }
45
46 uint32_t length = fdt_totalsize(fdt);
47
48 paddr_t base = fdt_phys;
49 base = PAGE_ALIGN(base);
50 length = ROUNDUP(length, PAGE_SIZE);
51
52 dprintf(INFO, "FDT: reserving physical range for FDT: [%#lx, %#lx]\n", base, base + length - 1);
53
54 #if WITH_KERNEL_VM
55 struct list_node list = LIST_INITIAL_VALUE(list);
56 pmm_alloc_range(base, length / PAGE_SIZE, &list);
57 #else
58 novm_alloc_specific_pages((void *)base, length / PAGE_SIZE);
59 #endif
60 return NO_ERROR;
61 }
62
fdtwalk_setup_memory(const void * fdt,paddr_t fdt_phys,paddr_t default_mem_base,size_t default_mem_size)63 status_t fdtwalk_setup_memory(const void *fdt, paddr_t fdt_phys, paddr_t default_mem_base, size_t default_mem_size) {
64 #if WITH_KERNEL_VM
65 // TODO: consider having pmm make a copy so that this doesn't have to be static
66 static pmm_arena_t arenas[8];
67 #endif
68
69 struct fdt_walk_memory_region mem[8];
70 struct fdt_walk_memory_region reserved_mem[16];
71 size_t mem_count = countof(mem);
72 size_t reserved_mem_count = countof(reserved_mem);
73
74 // find memory from the FDT
75 status_t err = fdt_walk_find_memory(fdt, mem, &mem_count, reserved_mem, &reserved_mem_count);
76 if (err < NO_ERROR || mem_count == 0) {
77 /* add a default memory region if we didn't find it in the FDT */
78 dprintf(INFO, "FDT: could not find memory, using default base %#lx size %#zx\n", default_mem_base, default_mem_size);
79 #if WITH_KERNEL_VM
80 mem[0].base = default_mem_base;
81 mem[0].len = default_mem_size;
82 mem_count = 1;
83 #endif
84 }
85
86 for (size_t i = 0; i < mem_count; i++) {
87 LTRACEF("base %#llx len %#llx\n", mem[i].base, mem[i].len);
88 dprintf(INFO, "FDT: found memory bank range [%#llx, %#llx] (length %#llx)\n", mem[i].base, mem[i].base + mem[i].len - 1, mem[i].len);
89
90 /* trim size on certain platforms */
91 #if ARCH_ARM || (ARCH_RISCV && __riscv_xlen == 32)
92 /* only use the first 1GB on ARM32 */
93 const auto GB = 1024*1024*1024UL;
94 if (mem[i].base - MEMBASE > GB) {
95 dprintf(INFO, "trimming memory to 1GB\n");
96 continue;
97 }
98 if (mem[i].base - MEMBASE + mem[i].len > GB) {
99 dprintf(INFO, "trimming memory to 1GB\n");
100 mem[i].len = MEMBASE + GB - mem[i].base;
101 dprintf(INFO, "range is now [%#llx, %#llx]\n", mem[i].base, mem[i].base + mem[i].len - 1);
102 }
103 #endif
104
105 #if WITH_KERNEL_VM
106 if (i >= countof(arenas)) {
107 printf("FDT: found too many arenas, max is %zu\n", countof(arenas));
108 break;
109 }
110
111 /* add a vm arena */
112 arenas[i].name = "fdt";
113 arenas[i].base = mem[i].base;
114 arenas[i].size = mem[i].len;
115 arenas[i].flags = PMM_ARENA_FLAG_KMAP;
116 pmm_add_arena(&arenas[i]);
117 #else
118 novm_add_arena("fdt", mem[i].base, mem[i].len);
119 #endif
120 }
121
122 /* reserve memory described by the FDT */
123 for (size_t i = 0; i < reserved_mem_count; i++) {
124 dprintf(INFO, "FDT: reserving memory range [%#llx, %#llx]\n",
125 reserved_mem[i].base, reserved_mem[i].base + reserved_mem[i].len - 1);
126
127 #if WITH_KERNEL_VM
128 // allocate the range and place on a list
129 struct list_node list = LIST_INITIAL_VALUE(list);
130 pmm_alloc_range(reserved_mem[i].base, reserved_mem[i].len / PAGE_SIZE, &list);
131 #else
132 novm_alloc_specific_pages((void *)reserved_mem[i].base, reserved_mem[i].len / PAGE_SIZE);
133 #endif
134 }
135
136 // TODO: deal with fdt reserved memory sections with
137 // fdt_num_mem_rsv and
138 // fdt_get_mem_rsv
139
140 // reserve the memory the device tree itself uses
141 fdtwalk_reserve_fdt_memory(fdt, fdt_phys);
142
143 return NO_ERROR;
144 }
145
146 #if ARCH_RISCV
fdtwalk_setup_cpus_riscv(const void * fdt)147 status_t fdtwalk_setup_cpus_riscv(const void *fdt) {
148 #if WITH_SMP
149 struct fdt_walk_cpu_info cpus[SMP_MAX_CPUS];
150 size_t cpu_count = countof(cpus);
151
152 status_t err = fdt_walk_find_cpus(fdt, cpus, &cpu_count);
153 if (err >= NO_ERROR) {
154 const char *isa_string = {};
155
156 if (cpu_count > 0) {
157 dprintf(INFO, "FDT: found %zu cpu%c\n", cpu_count, cpu_count == 1 ? ' ' : 's');
158 uint harts[SMP_MAX_CPUS - 1];
159
160 // copy from the detected cpu list to an array of harts, excluding the boot hart
161 size_t hart_index = 0;
162 for (size_t i = 0; i < cpu_count; i++) {
163 if (cpus[i].id != riscv_current_hart()) {
164 harts[hart_index++] = cpus[i].id;
165 }
166
167 // we can start MAX CPUS - 1 secondaries
168 if (hart_index >= SMP_MAX_CPUS - 1) {
169 break;
170 }
171
172 if (cpus[i].isa_string) {
173 if (!isa_string) {
174 // save the first isa string we found
175 isa_string = cpus[i].isa_string;
176 } else {
177 if (strcmp(cpus[i].isa_string, isa_string) != 0) {
178 printf("FDT Warning: isa_strings do not match between cpus, using first found\n");
179 }
180 }
181 }
182
183 }
184
185 // tell the riscv layer how many cores we have to start
186 if (hart_index > 0) {
187 riscv_set_secondary_harts_to_start(harts, hart_index);
188 }
189
190 if (isa_string) {
191 dprintf(INFO, "FDT: isa string '%s'\n", isa_string);
192 riscv_set_isa_string(isa_string);
193 }
194 }
195 }
196 #endif
197
198 return err;
199 }
200 #endif
201
202 #if ARCH_ARM || ARCH_ARM64
fdtwalk_setup_cpus_arm(const void * fdt)203 status_t fdtwalk_setup_cpus_arm(const void *fdt) {
204 #if WITH_SMP
205 struct fdt_walk_cpu_info cpus[SMP_MAX_CPUS];
206 size_t cpu_count = countof(cpus);
207
208 status_t err = fdt_walk_find_cpus(fdt, cpus, &cpu_count);
209 if (err >= NO_ERROR) {
210 if (cpu_count > 0) {
211 dprintf(INFO, "FDT: found %zu cpu%c\n", cpu_count, cpu_count == 1 ? ' ' : 's');
212
213 if (cpu_count > SMP_MAX_CPUS) {
214 cpu_count = MIN(cpu_count, SMP_MAX_CPUS);
215 dprintf(INFO, "FDT: clamping max cpus to %zu\n", cpu_count);
216 }
217
218 LTRACEF("booting %zu cpus\n", cpu_count);
219
220 /* boot the secondary cpus using the Power State Coordintion Interface */
221 for (size_t i = 1; i < cpu_count; i++) {
222 /* note: assumes cpuids are numbered like MPIDR 0:0:0:N */
223 dprintf(INFO, "ARM: starting cpu %#x\n", cpus[i].id);
224 int ret = psci_cpu_on(cpus[i].id, MEMBASE + KERNEL_LOAD_OFFSET);
225 if (ret != 0) {
226 printf("ERROR: psci CPU_ON returns %d\n", ret);
227 }
228 }
229 }
230 }
231 #endif
232
233 return err;
234 }
235 #endif
236
237 #if WITH_DEV_BUS_PCI
fdtwalk_setup_pci(const void * fdt)238 status_t fdtwalk_setup_pci(const void *fdt) {
239 /* detect pci */
240 struct fdt_walk_pcie_info pcie_info[4] = {};
241
242 size_t count = countof(pcie_info);
243 status_t err = fdt_walk_find_pcie_info(fdt, pcie_info, &count);
244 LTRACEF("fdt_walk_find_pcie_info returns %d, count %zu\n", err, count);
245 if (err == NO_ERROR) {
246 for (size_t i = 0; i < count; i++) {
247 LTRACEF("ecam base %#" PRIx64 ", len %#" PRIx64 ", bus_start %hhu, bus_end %hhu\n", pcie_info[i].ecam_base,
248 pcie_info[i].ecam_len, pcie_info[i].bus_start, pcie_info[i].bus_end);
249
250 // currently can only handle the first segment
251 if (i > 0) {
252 printf("skipping pci segment %zu, not supported (yet)\n", i);
253 continue;
254 }
255
256 if (pcie_info[i].ecam_len > 0) {
257 dprintf(INFO, "PCIE: initializing pcie with ecam at %#" PRIx64 " found in FDT\n", pcie_info[i].ecam_base);
258 err = pci_init_ecam(pcie_info[i].ecam_base, pcie_info[i].ecam_len, pcie_info[i].bus_start, pcie_info[i].bus_end);
259 if (err == NO_ERROR) {
260 // add some additional resources to the pci bus manager in case it needs to configure
261 if (pcie_info[i].io_len > 0) {
262 // we can only deal with a mapping of io base 0 to the mmio base
263 DEBUG_ASSERT(pcie_info[i].io_base == 0);
264 pci_bus_mgr_add_resource(PCI_RESOURCE_IO_RANGE, pcie_info[i].io_base, pcie_info[i].io_len);
265
266 // TODO: set the mmio base somehow so pci knows what to do with it
267 }
268 if (pcie_info[i].mmio_len > 0) {
269 pci_bus_mgr_add_resource(PCI_RESOURCE_MMIO_RANGE, pcie_info[i].mmio_base, pcie_info[i].mmio_len);
270 }
271 if (sizeof(void *) >= 8) {
272 if (pcie_info[i].mmio64_len > 0) {
273 pci_bus_mgr_add_resource(PCI_RESOURCE_MMIO64_RANGE, pcie_info[i].mmio64_base, pcie_info[i].mmio64_len);
274 }
275 }
276 }
277 }
278 }
279 }
280
281 return err;
282 }
283 #endif
284
285