1 /*
2 * xen/arch/arm/setup.c
3 *
4 * Early bringup code for an ARMv7-A with virt extensions.
5 *
6 * Tim Deegan <tim@xen.org>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/compile.h>
21 #include <xen/device_tree.h>
22 #include <xen/domain_page.h>
23 #include <xen/types.h>
24 #include <xen/string.h>
25 #include <xen/serial.h>
26 #include <xen/sched.h>
27 #include <xen/console.h>
28 #include <xen/err.h>
29 #include <xen/init.h>
30 #include <xen/irq.h>
31 #include <xen/mm.h>
32 #include <xen/softirq.h>
33 #include <xen/keyhandler.h>
34 #include <xen/cpu.h>
35 #include <xen/pfn.h>
36 #include <xen/virtual_region.h>
37 #include <xen/vmap.h>
38 #include <xen/libfdt/libfdt.h>
39 #include <xen/acpi.h>
40 #include <asm/alternative.h>
41 #include <asm/page.h>
42 #include <asm/current.h>
43 #include <asm/setup.h>
44 #include <asm/gic.h>
45 #include <asm/cpuerrata.h>
46 #include <asm/cpufeature.h>
47 #include <asm/platform.h>
48 #include <asm/procinfo.h>
49 #include <asm/setup.h>
50 #include <xsm/xsm.h>
51 #include <asm/acpi.h>
52
53 struct bootinfo __initdata bootinfo;
54
55 struct cpuinfo_arm __read_mostly boot_cpu_data;
56
57 #ifdef CONFIG_ACPI
58 bool __read_mostly acpi_disabled;
59 #endif
60
61 #ifdef CONFIG_ARM_32
62 static unsigned long opt_xenheap_megabytes __initdata;
63 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
64 #endif
65
init_done(void)66 static __used void init_done(void)
67 {
68 free_init_memory();
69 startup_cpu_idle_loop();
70 }
71
init_idle_domain(void)72 static void __init init_idle_domain(void)
73 {
74 scheduler_init();
75 set_current(idle_vcpu[0]);
76 /* TODO: setup_idle_pagetable(); */
77 }
78
79 static const char * __initdata processor_implementers[] = {
80 ['A'] = "ARM Limited",
81 ['B'] = "Broadcom Corporation",
82 ['C'] = "Cavium Inc.",
83 ['D'] = "Digital Equipment Corp",
84 ['M'] = "Motorola, Freescale Semiconductor Inc.",
85 ['P'] = "Applied Micro",
86 ['Q'] = "Qualcomm Inc.",
87 ['V'] = "Marvell Semiconductor Inc.",
88 ['i'] = "Intel Corporation",
89 };
90
processor_id(void)91 static void __init processor_id(void)
92 {
93 const char *implementer = "Unknown";
94 struct cpuinfo_arm *c = &boot_cpu_data;
95
96 identify_cpu(c);
97 current_cpu_data = *c;
98
99 if ( c->midr.implementer < ARRAY_SIZE(processor_implementers) &&
100 processor_implementers[c->midr.implementer] )
101 implementer = processor_implementers[c->midr.implementer];
102
103 if ( c->midr.architecture != 0xf )
104 printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n",
105 c->midr.architecture);
106
107 printk("Processor: %08"PRIx32": \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n",
108 c->midr.bits, implementer,
109 c->midr.variant, c->midr.part_number, c->midr.revision);
110
111 #if defined(CONFIG_ARM_64)
112 printk("64-bit Execution:\n");
113 printk(" Processor Features: %016"PRIx64" %016"PRIx64"\n",
114 boot_cpu_data.pfr64.bits[0], boot_cpu_data.pfr64.bits[1]);
115 printk(" Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n",
116 cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No",
117 cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No",
118 cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No",
119 cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No");
120 printk(" Extensions:%s%s%s\n",
121 cpu_has_fp ? " FloatingPoint" : "",
122 cpu_has_simd ? " AdvancedSIMD" : "",
123 cpu_has_gicv3 ? " GICv3-SysReg" : "");
124
125 printk(" Debug Features: %016"PRIx64" %016"PRIx64"\n",
126 boot_cpu_data.dbg64.bits[0], boot_cpu_data.dbg64.bits[1]);
127 printk(" Auxiliary Features: %016"PRIx64" %016"PRIx64"\n",
128 boot_cpu_data.aux64.bits[0], boot_cpu_data.aux64.bits[1]);
129 printk(" Memory Model Features: %016"PRIx64" %016"PRIx64"\n",
130 boot_cpu_data.mm64.bits[0], boot_cpu_data.mm64.bits[1]);
131 printk(" ISA Features: %016"PRIx64" %016"PRIx64"\n",
132 boot_cpu_data.isa64.bits[0], boot_cpu_data.isa64.bits[1]);
133 #endif
134
135 /*
136 * On AArch64 these refer to the capabilities when running in
137 * AArch32 mode.
138 */
139 if ( cpu_has_aarch32 )
140 {
141 printk("32-bit Execution:\n");
142 printk(" Processor Features: %08"PRIx32":%08"PRIx32"\n",
143 boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]);
144 printk(" Instruction Sets:%s%s%s%s%s%s\n",
145 cpu_has_aarch32 ? " AArch32" : "",
146 cpu_has_arm ? " A32" : "",
147 cpu_has_thumb ? " Thumb" : "",
148 cpu_has_thumb2 ? " Thumb-2" : "",
149 cpu_has_thumbee ? " ThumbEE" : "",
150 cpu_has_jazelle ? " Jazelle" : "");
151 printk(" Extensions:%s%s\n",
152 cpu_has_gentimer ? " GenericTimer" : "",
153 cpu_has_security ? " Security" : "");
154
155 printk(" Debug Features: %08"PRIx32"\n",
156 boot_cpu_data.dbg32.bits[0]);
157 printk(" Auxiliary Features: %08"PRIx32"\n",
158 boot_cpu_data.aux32.bits[0]);
159 printk(" Memory Model Features: "
160 "%08"PRIx32" %08"PRIx32" %08"PRIx32" %08"PRIx32"\n",
161 boot_cpu_data.mm32.bits[0], boot_cpu_data.mm32.bits[1],
162 boot_cpu_data.mm32.bits[2], boot_cpu_data.mm32.bits[3]);
163 printk(" ISA Features: %08x %08x %08x %08x %08x %08x\n",
164 boot_cpu_data.isa32.bits[0], boot_cpu_data.isa32.bits[1],
165 boot_cpu_data.isa32.bits[2], boot_cpu_data.isa32.bits[3],
166 boot_cpu_data.isa32.bits[4], boot_cpu_data.isa32.bits[5]);
167 }
168 else
169 {
170 printk("32-bit Execution: Unsupported\n");
171 }
172
173 processor_setup();
174
175 check_local_cpu_errata();
176 }
177
dt_unreserved_regions(paddr_t s,paddr_t e,void (* cb)(paddr_t,paddr_t),int first)178 void dt_unreserved_regions(paddr_t s, paddr_t e,
179 void (*cb)(paddr_t, paddr_t), int first)
180 {
181 int i, nr = fdt_num_mem_rsv(device_tree_flattened);
182
183 for ( i = first; i < nr ; i++ )
184 {
185 paddr_t r_s, r_e;
186
187 if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e ) < 0 )
188 /* If we can't read it, pretend it doesn't exist... */
189 continue;
190
191 r_e += r_s; /* fdt_get_mem_rsv returns length */
192
193 if ( s < r_e && r_s < e )
194 {
195 dt_unreserved_regions(r_e, e, cb, i+1);
196 dt_unreserved_regions(s, r_s, cb, i+1);
197 return;
198 }
199 }
200
201 cb(s, e);
202 }
203
add_boot_module(bootmodule_kind kind,paddr_t start,paddr_t size,const char * cmdline)204 struct bootmodule *add_boot_module(bootmodule_kind kind,
205 paddr_t start, paddr_t size,
206 const char *cmdline)
207 {
208 struct bootmodules *mods = &bootinfo.modules;
209 struct bootmodule *mod;
210
211 if ( mods->nr_mods == MAX_MODULES )
212 {
213 printk("Ignoring %s boot module at %"PRIpaddr"-%"PRIpaddr" (too many)\n",
214 boot_module_kind_as_string(kind), start, start + size);
215 return NULL;
216 }
217
218 mod = &mods->module[mods->nr_mods++];
219 mod->kind = kind;
220 mod->start = start;
221 mod->size = size;
222 if ( cmdline )
223 safe_strcpy(mod->cmdline, cmdline);
224 else
225 mod->cmdline[0] = 0;
226
227 return mod;
228 }
229
boot_module_find_by_kind(bootmodule_kind kind)230 struct bootmodule * __init boot_module_find_by_kind(bootmodule_kind kind)
231 {
232 struct bootmodules *mods = &bootinfo.modules;
233 struct bootmodule *mod;
234 int i;
235 for (i = 0 ; i < mods->nr_mods ; i++ )
236 {
237 mod = &mods->module[i];
238 if ( mod->kind == kind )
239 return mod;
240 }
241 return NULL;
242 }
243
boot_module_kind_as_string(bootmodule_kind kind)244 const char * __init boot_module_kind_as_string(bootmodule_kind kind)
245 {
246 switch ( kind )
247 {
248 case BOOTMOD_XEN: return "Xen";
249 case BOOTMOD_FDT: return "Device Tree";
250 case BOOTMOD_KERNEL: return "Kernel";
251 case BOOTMOD_RAMDISK: return "Ramdisk";
252 case BOOTMOD_XSM: return "XSM";
253 case BOOTMOD_UNKNOWN: return "Unknown";
254 default: BUG();
255 }
256 }
257
discard_initial_modules(void)258 void __init discard_initial_modules(void)
259 {
260 struct bootmodules *mi = &bootinfo.modules;
261 int i;
262
263 for ( i = 0; i < mi->nr_mods; i++ )
264 {
265 paddr_t s = mi->module[i].start;
266 paddr_t e = s + PAGE_ALIGN(mi->module[i].size);
267
268 if ( mi->module[i].kind == BOOTMOD_XEN )
269 continue;
270
271 if ( !mfn_valid(_mfn(paddr_to_pfn(s))) ||
272 !mfn_valid(_mfn(paddr_to_pfn(e))))
273 continue;
274
275 dt_unreserved_regions(s, e, init_domheap_pages, 0);
276 }
277
278 mi->nr_mods = 0;
279
280 remove_early_mappings();
281 }
282
283 /*
284 * Returns the end address of the highest region in the range s..e
285 * with required size and alignment that does not conflict with the
286 * modules from first_mod to nr_modules.
287 *
288 * For non-recursive callers first_mod should normally be 0 (all
289 * modules and Xen itself) or 1 (all modules but not Xen).
290 */
consider_modules(paddr_t s,paddr_t e,uint32_t size,paddr_t align,int first_mod)291 static paddr_t __init consider_modules(paddr_t s, paddr_t e,
292 uint32_t size, paddr_t align,
293 int first_mod)
294 {
295 const struct bootmodules *mi = &bootinfo.modules;
296 int i;
297 int nr_rsvd;
298
299 s = (s+align-1) & ~(align-1);
300 e = e & ~(align-1);
301
302 if ( s > e || e - s < size )
303 return 0;
304
305 /* First check the boot modules */
306 for ( i = first_mod; i < mi->nr_mods; i++ )
307 {
308 paddr_t mod_s = mi->module[i].start;
309 paddr_t mod_e = mod_s + mi->module[i].size;
310
311 if ( s < mod_e && mod_s < e )
312 {
313 mod_e = consider_modules(mod_e, e, size, align, i+1);
314 if ( mod_e )
315 return mod_e;
316
317 return consider_modules(s, mod_s, size, align, i+1);
318 }
319 }
320
321 /* Now check any fdt reserved areas. */
322
323 nr_rsvd = fdt_num_mem_rsv(device_tree_flattened);
324
325 for ( ; i < mi->nr_mods + nr_rsvd; i++ )
326 {
327 paddr_t mod_s, mod_e;
328
329 if ( fdt_get_mem_rsv(device_tree_flattened,
330 i - mi->nr_mods,
331 &mod_s, &mod_e ) < 0 )
332 /* If we can't read it, pretend it doesn't exist... */
333 continue;
334
335 /* fdt_get_mem_rsv returns length */
336 mod_e += mod_s;
337
338 if ( s < mod_e && mod_s < e )
339 {
340 mod_e = consider_modules(mod_e, e, size, align, i+1);
341 if ( mod_e )
342 return mod_e;
343
344 return consider_modules(s, mod_s, size, align, i+1);
345 }
346 }
347 return e;
348 }
349
350 /*
351 * Return the end of the non-module region starting at s. In other
352 * words return s the start of the next modules after s.
353 *
354 * On input *end is the end of the region which should be considered
355 * and it is updated to reflect the end of the module, clipped to the
356 * end of the region if it would run over.
357 */
next_module(paddr_t s,paddr_t * end)358 static paddr_t __init next_module(paddr_t s, paddr_t *end)
359 {
360 struct bootmodules *mi = &bootinfo.modules;
361 paddr_t lowest = ~(paddr_t)0;
362 int i;
363
364 for ( i = 0; i < mi->nr_mods; i++ )
365 {
366 paddr_t mod_s = mi->module[i].start;
367 paddr_t mod_e = mod_s + mi->module[i].size;
368
369 if ( !mi->module[i].size )
370 continue;
371
372 if ( mod_s < s )
373 continue;
374 if ( mod_s > lowest )
375 continue;
376 if ( mod_s > *end )
377 continue;
378 lowest = mod_s;
379 *end = min(*end, mod_e);
380 }
381 return lowest;
382 }
383
384
385 /**
386 * get_xen_paddr - get physical address to relocate Xen to
387 *
388 * Xen is relocated to as near to the top of RAM as possible and
389 * aligned to a XEN_PADDR_ALIGN boundary.
390 */
get_xen_paddr(void)391 static paddr_t __init get_xen_paddr(void)
392 {
393 struct meminfo *mi = &bootinfo.mem;
394 paddr_t min_size;
395 paddr_t paddr = 0;
396 int i;
397
398 min_size = (_end - _start + (XEN_PADDR_ALIGN-1)) & ~(XEN_PADDR_ALIGN-1);
399
400 /* Find the highest bank with enough space. */
401 for ( i = 0; i < mi->nr_banks; i++ )
402 {
403 const struct membank *bank = &mi->bank[i];
404 paddr_t s, e;
405
406 if ( bank->size >= min_size )
407 {
408 e = consider_modules(bank->start, bank->start + bank->size,
409 min_size, XEN_PADDR_ALIGN, 0);
410 if ( !e )
411 continue;
412
413 #ifdef CONFIG_ARM_32
414 /* Xen must be under 4GB */
415 if ( e > 0x100000000ULL )
416 e = 0x100000000ULL;
417 if ( e < bank->start )
418 continue;
419 #endif
420
421 s = e - min_size;
422
423 if ( s > paddr )
424 paddr = s;
425 }
426 }
427
428 if ( !paddr )
429 panic("Not enough memory to relocate Xen");
430
431 printk("Placing Xen at 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
432 paddr, paddr + min_size);
433
434 return paddr;
435 }
436
init_pdx(void)437 static void init_pdx(void)
438 {
439 paddr_t bank_start, bank_size, bank_end;
440
441 u64 mask = pdx_init_mask(bootinfo.mem.bank[0].start);
442 int bank;
443
444 for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
445 {
446 bank_start = bootinfo.mem.bank[bank].start;
447 bank_size = bootinfo.mem.bank[bank].size;
448
449 mask |= bank_start | pdx_region_mask(bank_start, bank_size);
450 }
451
452 for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
453 {
454 bank_start = bootinfo.mem.bank[bank].start;
455 bank_size = bootinfo.mem.bank[bank].size;
456
457 if (~mask & pdx_region_mask(bank_start, bank_size))
458 mask = 0;
459 }
460
461 pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
462
463 for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
464 {
465 bank_start = bootinfo.mem.bank[bank].start;
466 bank_size = bootinfo.mem.bank[bank].size;
467 bank_end = bank_start + bank_size;
468
469 set_pdx_range(paddr_to_pfn(bank_start),
470 paddr_to_pfn(bank_end));
471 }
472 }
473
474 #ifdef CONFIG_ARM_32
setup_mm(unsigned long dtb_paddr,size_t dtb_size)475 static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
476 {
477 paddr_t ram_start, ram_end, ram_size;
478 paddr_t s, e;
479 unsigned long ram_pages;
480 unsigned long heap_pages, xenheap_pages, domheap_pages;
481 unsigned long dtb_pages;
482 unsigned long boot_mfn_start, boot_mfn_end;
483 int i;
484 void *fdt;
485
486 if ( !bootinfo.mem.nr_banks )
487 panic("No memory bank");
488
489 init_pdx();
490
491 ram_start = bootinfo.mem.bank[0].start;
492 ram_size = bootinfo.mem.bank[0].size;
493 ram_end = ram_start + ram_size;
494
495 for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
496 {
497 paddr_t bank_start = bootinfo.mem.bank[i].start;
498 paddr_t bank_size = bootinfo.mem.bank[i].size;
499 paddr_t bank_end = bank_start + bank_size;
500
501 ram_size = ram_size + bank_size;
502 ram_start = min(ram_start,bank_start);
503 ram_end = max(ram_end,bank_end);
504 }
505
506 total_pages = ram_pages = ram_size >> PAGE_SHIFT;
507
508 /*
509 * If the user has not requested otherwise via the command line
510 * then locate the xenheap using these constraints:
511 *
512 * - must be 32 MiB aligned
513 * - must not include Xen itself or the boot modules
514 * - must be at most 1GB or 1/32 the total RAM in the system if less
515 * - must be at least 32M
516 *
517 * We try to allocate the largest xenheap possible within these
518 * constraints.
519 */
520 heap_pages = ram_pages;
521 if ( opt_xenheap_megabytes )
522 xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT);
523 else
524 {
525 xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL;
526 xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT));
527 xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
528 }
529
530 do
531 {
532 e = consider_modules(ram_start, ram_end,
533 pfn_to_paddr(xenheap_pages),
534 32<<20, 0);
535 if ( e )
536 break;
537
538 xenheap_pages >>= 1;
539 } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );
540
541 if ( ! e )
542 panic("Not not enough space for xenheap");
543
544 domheap_pages = heap_pages - xenheap_pages;
545
546 printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n",
547 e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages,
548 opt_xenheap_megabytes ? ", from command-line" : "");
549 printk("Dom heap: %lu pages\n", domheap_pages);
550
551 setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);
552
553 /*
554 * Need a single mapped page for populating bootmem_region_list
555 * and enough mapped pages for copying the DTB.
556 */
557 dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
558 boot_mfn_start = mfn_x(xenheap_mfn_end) - dtb_pages - 1;
559 boot_mfn_end = mfn_x(xenheap_mfn_end);
560
561 init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));
562
563 /* Copy the DTB. */
564 fdt = mfn_to_virt(mfn_x(alloc_boot_pages(dtb_pages, 1)));
565 copy_from_paddr(fdt, dtb_paddr, dtb_size);
566 device_tree_flattened = fdt;
567
568 /* Add non-xenheap memory */
569 for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
570 {
571 paddr_t bank_start = bootinfo.mem.bank[i].start;
572 paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;
573
574 s = bank_start;
575 while ( s < bank_end )
576 {
577 paddr_t n = bank_end;
578
579 e = next_module(s, &n);
580
581 if ( e == ~(paddr_t)0 )
582 {
583 e = n = ram_end;
584 }
585
586 /*
587 * Module in a RAM bank other than the one which we are
588 * not dealing with here.
589 */
590 if ( e > bank_end )
591 e = bank_end;
592
593 /* Avoid the xenheap */
594 if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages))
595 && mfn_to_maddr(xenheap_mfn_start) < e )
596 {
597 e = mfn_to_maddr(xenheap_mfn_start);
598 n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages));
599 }
600
601 dt_unreserved_regions(s, e, init_boot_pages, 0);
602
603 s = n;
604 }
605 }
606
607 /* Frame table covers all of RAM region, including holes */
608 setup_frametable_mappings(ram_start, ram_end);
609 max_page = PFN_DOWN(ram_end);
610
611 /* Add xenheap memory that was not already added to the boot
612 allocator. */
613 init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
614 pfn_to_paddr(boot_mfn_start));
615 }
616 #else /* CONFIG_ARM_64 */
setup_mm(unsigned long dtb_paddr,size_t dtb_size)617 static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
618 {
619 paddr_t ram_start = ~0;
620 paddr_t ram_end = 0;
621 paddr_t ram_size = 0;
622 int bank;
623 unsigned long dtb_pages;
624 void *fdt;
625
626 init_pdx();
627
628 total_pages = 0;
629 for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
630 {
631 paddr_t bank_start = bootinfo.mem.bank[bank].start;
632 paddr_t bank_size = bootinfo.mem.bank[bank].size;
633 paddr_t bank_end = bank_start + bank_size;
634 paddr_t s, e;
635
636 ram_size = ram_size + bank_size;
637 ram_start = min(ram_start,bank_start);
638 ram_end = max(ram_end,bank_end);
639
640 setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);
641
642 s = bank_start;
643 while ( s < bank_end )
644 {
645 paddr_t n = bank_end;
646
647 e = next_module(s, &n);
648
649 if ( e == ~(paddr_t)0 )
650 {
651 e = n = bank_end;
652 }
653
654 if ( e > bank_end )
655 e = bank_end;
656
657 dt_unreserved_regions(s, e, init_boot_pages, 0);
658 s = n;
659 }
660 }
661
662 total_pages += ram_size >> PAGE_SHIFT;
663
664 xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
665 xenheap_mfn_start = maddr_to_mfn(ram_start);
666 xenheap_mfn_end = maddr_to_mfn(ram_end);
667
668 /*
669 * Need enough mapped pages for copying the DTB.
670 */
671 dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
672
673 /* Copy the DTB. */
674 fdt = mfn_to_virt(mfn_x(alloc_boot_pages(dtb_pages, 1)));
675 copy_from_paddr(fdt, dtb_paddr, dtb_size);
676 device_tree_flattened = fdt;
677
678 setup_frametable_mappings(ram_start, ram_end);
679 max_page = PFN_DOWN(ram_end);
680 }
681 #endif
682
683 size_t __read_mostly cacheline_bytes;
684
685 /* Very early check of the CPU cache properties */
setup_cache(void)686 void __init setup_cache(void)
687 {
688 uint32_t ccsid;
689
690 /* Read the cache size ID register for the level-0 data cache */
691 WRITE_SYSREG32(0, CSSELR_EL1);
692 ccsid = READ_SYSREG32(CCSIDR_EL1);
693
694 /* Low 3 bits are log2(cacheline size in words) - 2. */
695 cacheline_bytes = 1U << (4 + (ccsid & 0x7));
696 }
697
698 /* C entry point for boot CPU */
start_xen(unsigned long boot_phys_offset,unsigned long fdt_paddr,unsigned long cpuid)699 void __init start_xen(unsigned long boot_phys_offset,
700 unsigned long fdt_paddr,
701 unsigned long cpuid)
702 {
703 size_t fdt_size;
704 int cpus, i;
705 paddr_t xen_paddr;
706 const char *cmdline;
707 struct bootmodule *xen_bootmodule;
708 struct domain *dom0;
709 struct xen_arch_domainconfig config;
710
711 setup_cache();
712
713 percpu_init_areas();
714 set_processor_id(0); /* needed early, for smp_processor_id() */
715
716 set_current((struct vcpu *)0xfffff000); /* debug sanity */
717 idle_vcpu[0] = current;
718
719 setup_virtual_regions(NULL, NULL);
720 /* Initialize traps early allow us to get backtrace when an error occurred */
721 init_traps();
722
723 smp_clear_cpu_maps();
724
725 device_tree_flattened = early_fdt_map(fdt_paddr);
726 if ( !device_tree_flattened )
727 panic("Invalid device tree blob at physical address %#lx.\n"
728 "The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n"
729 "Please check your bootloader.",
730 fdt_paddr);
731
732 fdt_size = boot_fdt_info(device_tree_flattened, fdt_paddr);
733
734 cmdline = boot_fdt_cmdline(device_tree_flattened);
735 printk("Command line: %s\n", cmdline);
736 cmdline_parse(cmdline);
737
738 /* Register Xen's load address as a boot module. */
739 xen_bootmodule = add_boot_module(BOOTMOD_XEN,
740 (paddr_t)(uintptr_t)(_start + boot_phys_offset),
741 (paddr_t)(uintptr_t)(_end - _start + 1), NULL);
742 BUG_ON(!xen_bootmodule);
743
744 xen_paddr = get_xen_paddr();
745 setup_pagetables(boot_phys_offset, xen_paddr);
746
747 /* Update Xen's address now that we have relocated. */
748 printk("Update BOOTMOD_XEN from %"PRIpaddr"-%"PRIpaddr" => %"PRIpaddr"-%"PRIpaddr"\n",
749 xen_bootmodule->start, xen_bootmodule->start + xen_bootmodule->size,
750 xen_paddr, xen_paddr + xen_bootmodule->size);
751 xen_bootmodule->start = xen_paddr;
752
753 setup_mm(fdt_paddr, fdt_size);
754
755 /* Parse the ACPI tables for possible boot-time configuration */
756 acpi_boot_table_init();
757
758 end_boot_allocator();
759
760 /*
761 * The memory subsystem has been initialized, we can now switch from
762 * early_boot -> boot.
763 */
764 system_state = SYS_STATE_boot;
765
766 vm_init();
767
768 if ( acpi_disabled )
769 {
770 printk("Booting using Device Tree\n");
771 dt_unflatten_host_device_tree();
772 }
773 else
774 printk("Booting using ACPI\n");
775
776 init_IRQ();
777
778 platform_init();
779
780 preinit_xen_time();
781
782 gic_preinit();
783
784 arm_uart_init();
785 console_init_preirq();
786 console_init_ring();
787
788 processor_id();
789
790 smp_init_cpus();
791 cpus = smp_get_max_cpus();
792 printk(XENLOG_INFO "SMP: Allowing %u CPUs\n", cpus);
793 nr_cpu_ids = cpus;
794
795 init_xen_time();
796
797 gic_init();
798
799 softirq_init();
800
801 tasklet_subsys_init();
802
803
804 xsm_dt_init();
805
806 init_maintenance_interrupt();
807 init_timer_interrupt();
808
809 timer_init();
810
811 init_idle_domain();
812
813 rcu_init();
814
815 arch_init_memory();
816
817 local_irq_enable();
818 local_abort_enable();
819
820 smp_prepare_cpus(cpus);
821
822 initialize_keytable();
823
824 console_init_postirq();
825
826 do_presmp_initcalls();
827
828 for_each_present_cpu ( i )
829 {
830 if ( (num_online_cpus() < cpus) && !cpu_online(i) )
831 {
832 int ret = cpu_up(i);
833 if ( ret != 0 )
834 printk("Failed to bring up CPU %u (error %d)\n", i, ret);
835 }
836 }
837
838 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
839 /* TODO: smp_cpus_done(); */
840
841 setup_virt_paging();
842
843 iommu_setup();
844
845 do_initcalls();
846
847 /*
848 * It needs to be called after do_initcalls to be able to use
849 * stop_machine (tasklets initialized via an initcall).
850 */
851 apply_alternatives_all();
852
853 /* Create initial domain 0. */
854 /* The vGIC for DOM0 is exactly emulating the hardware GIC */
855 config.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE;
856 config.nr_spis = gic_number_lines() - 32;
857
858 dom0 = domain_create(0, 0, 0, &config);
859 if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
860 panic("Error creating domain 0");
861
862 dom0->is_privileged = 1;
863 dom0->target = NULL;
864
865 if ( construct_dom0(dom0) != 0)
866 panic("Could not set up DOM0 guest OS");
867
868 heap_init_late();
869
870 init_constructors();
871
872 console_endboot();
873
874 /* Hide UART from DOM0 if we're using it */
875 serial_endboot();
876
877 system_state = SYS_STATE_active;
878
879 /* Must be done past setting system_state. */
880 unregister_init_virtual_region();
881
882 domain_unpause_by_systemcontroller(dom0);
883
884 /* Switch on to the dynamically allocated stack for the idle vcpu
885 * since the static one we're running on is about to be freed. */
886 memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(),
887 sizeof(struct cpu_info));
888 switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done);
889 }
890
arch_get_xen_caps(xen_capabilities_info_t * info)891 void arch_get_xen_caps(xen_capabilities_info_t *info)
892 {
893 /* Interface name is always xen-3.0-* for Xen-3.x. */
894 int major = 3, minor = 0;
895 char s[32];
896
897 (*info)[0] = '\0';
898
899 #ifdef CONFIG_ARM_64
900 snprintf(s, sizeof(s), "xen-%d.%d-aarch64 ", major, minor);
901 safe_strcat(*info, s);
902 #endif
903 if ( cpu_has_aarch32 )
904 {
905 snprintf(s, sizeof(s), "xen-%d.%d-armv7l ", major, minor);
906 safe_strcat(*info, s);
907 }
908 }
909
910 /*
911 * Local variables:
912 * mode: C
913 * c-file-style: "BSD"
914 * c-basic-offset: 4
915 * indent-tabs-mode: nil
916 * End:
917 */
918