1/* 2 * xen/arch/arm/head.S 3 * 4 * Start-of-day code for an ARMv8. 5 * 6 * Ian Campbell <ian.campbell@citrix.com> 7 * Copyright (c) 2012 Citrix Systems. 8 * 9 * Based on ARMv7-A head.S by 10 * Tim Deegan <tim@xen.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 */ 22 23#include <asm/page.h> 24#include <asm/asm_defns.h> 25#include <asm/early_printk.h> 26#include <efi/efierr.h> 27#include <asm/arm64/efibind.h> 28 29#define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */ 30#define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */ 31#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */ 32#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */ 33#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */ 34 35#if (defined (CONFIG_EARLY_PRINTK)) && (defined (EARLY_PRINTK_INC)) 36#include EARLY_PRINTK_INC 37#endif 38 39/* 40 * Common register usage in this file: 41 * x0 - 42 * x1 - 43 * x2 - 44 * x3 - 45 * x4 - 46 * x5 - 47 * x6 - 48 * x7 - 49 * x8 - 50 * x9 - 51 * x10 - 52 * x11 - 53 * x12 - 54 * x13 - 55 * x14 - 56 * x15 - 57 * x16 - 58 * x17 - 59 * x18 - 60 * x19 - paddr(start) 61 * x20 - phys offset 62 * x21 - DTB address (boot cpu only) 63 * x22 - is_secondary_cpu 64 * x23 - UART address 65 * x24 - cpuid 66 * x25 - identity map in place 67 * x26 - skip_zero_bss 68 * x27 - 69 * x28 - 70 * x29 - 71 * x30 - lr 72 */ 73 74/* Macro to print a string to the UART, if there is one. 75 * Clobbers x0-x3. */ 76#ifdef CONFIG_EARLY_PRINTK 77#define PRINT(_s) \ 78 adr x0, 98f ; \ 79 bl puts ; \ 80 b 99f ; \ 8198: .asciz _s ; \ 82 .align 2 ; \ 8399: 84#else /* CONFIG_EARLY_PRINTK */ 85#define PRINT(s) 86#endif /* !CONFIG_EARLY_PRINTK */ 87 88 /*.aarch64*/ 89 90 /* 91 * Kernel startup entry point. 92 * --------------------------- 93 * 94 * The requirements are: 95 * MMU = off, D-cache = off, I-cache = on or off, 96 * x0 = physical address to the FDT blob. 97 * 98 * This must be the very first address in the loaded image. 99 * It should be linked at XEN_VIRT_START, and loaded at any 100 * 4K-aligned address. All of text+data+bss must fit in 2MB, 101 * or the initial pagetable code below will need adjustment. 102 */ 103 104GLOBAL(start) 105 /* 106 * DO NOT MODIFY. Image header expected by Linux boot-loaders. 107 */ 108efi_head: 109 /* 110 * This add instruction has no meaningful effect except that 111 * its opcode forms the magic "MZ" signature of a PE/COFF file 112 * that is required for UEFI applications. 113 */ 114 add x13, x18, #0x16 115 b real_start /* branch to kernel start */ 116 .quad 0 /* Image load offset from start of RAM */ 117 .quad 0 /* reserved */ 118 .quad 0 /* reserved */ 119 .quad 0 /* reserved */ 120 .quad 0 /* reserved */ 121 .quad 0 /* reserved */ 122 .byte 0x41 /* Magic number, "ARM\x64" */ 123 .byte 0x52 124 .byte 0x4d 125 .byte 0x64 126 .long pe_header - efi_head /* Offset to the PE header. */ 127 128 /* 129 * Add the PE/COFF header to the file. The address of this header 130 * is at offset 0x3c in the file, and is part of Linux "Image" 131 * header. The arm64 Linux Image format is designed to support 132 * being both an 'Image' format binary and a PE/COFF binary. 133 * The PE/COFF format is defined by Microsoft, and is available 134 * from: http://msdn.microsoft.com/en-us/gg463119.aspx 135 * Version 8.3 adds support for arm64 and UEFI usage. 136 */ 137 138 .align 3 139pe_header: 140 .ascii "PE" 141 .short 0 142coff_header: 143 .short 0xaa64 /* AArch64 */ 144 .short 2 /* nr_sections */ 145 .long 0 /* TimeDateStamp */ 146 .long 0 /* PointerToSymbolTable */ 147 .long 1 /* NumberOfSymbols */ 148 .short section_table - optional_header /* SizeOfOptionalHeader */ 149 .short 0x206 /* Characteristics. */ 150 /* IMAGE_FILE_DEBUG_STRIPPED | */ 151 /* IMAGE_FILE_EXECUTABLE_IMAGE | */ 152 /* IMAGE_FILE_LINE_NUMS_STRIPPED */ 153optional_header: 154 .short 0x20b /* PE32+ format */ 155 .byte 0x02 /* MajorLinkerVersion */ 156 .byte 0x14 /* MinorLinkerVersion */ 157 .long _end - real_start /* SizeOfCode */ 158 .long 0 /* SizeOfInitializedData */ 159 .long 0 /* SizeOfUninitializedData */ 160 .long efi_start - efi_head /* AddressOfEntryPoint */ 161 .long real_start - efi_head /* BaseOfCode */ 162 163extra_header_fields: 164 .quad 0 /* ImageBase */ 165 .long 0x1000 /* SectionAlignment (4 KByte) */ 166 .long 0x8 /* FileAlignment */ 167 .short 0 /* MajorOperatingSystemVersion */ 168 .short 0 /* MinorOperatingSystemVersion */ 169 .short 0 /* MajorImageVersion */ 170 .short 0 /* MinorImageVersion */ 171 .short 0 /* MajorSubsystemVersion */ 172 .short 0 /* MinorSubsystemVersion */ 173 .long 0 /* Win32VersionValue */ 174 175 .long _end - efi_head /* SizeOfImage */ 176 177 /* Everything before the kernel image is considered part of the header */ 178 .long real_start - efi_head /* SizeOfHeaders */ 179 .long 0 /* CheckSum */ 180 .short 0xa /* Subsystem (EFI application) */ 181 .short 0 /* DllCharacteristics */ 182 .quad 0 /* SizeOfStackReserve */ 183 .quad 0 /* SizeOfStackCommit */ 184 .quad 0 /* SizeOfHeapReserve */ 185 .quad 0 /* SizeOfHeapCommit */ 186 .long 0 /* LoaderFlags */ 187 .long 0x6 /* NumberOfRvaAndSizes */ 188 189 .quad 0 /* ExportTable */ 190 .quad 0 /* ImportTable */ 191 .quad 0 /* ResourceTable */ 192 .quad 0 /* ExceptionTable */ 193 .quad 0 /* CertificationTable */ 194 .quad 0 /* BaseRelocationTable */ 195 196 /* Section table */ 197section_table: 198 199 /* 200 * The EFI application loader requires a relocation section 201 * because EFI applications must be relocatable. This is a 202 * dummy section as far as we are concerned. 203 */ 204 .ascii ".reloc" 205 .byte 0 206 .byte 0 /* end of 0 padding of section name */ 207 .long 0 208 .long 0 209 .long 0 /* SizeOfRawData */ 210 .long 0 /* PointerToRawData */ 211 .long 0 /* PointerToRelocations */ 212 .long 0 /* PointerToLineNumbers */ 213 .short 0 /* NumberOfRelocations */ 214 .short 0 /* NumberOfLineNumbers */ 215 .long 0x42100040 /* Characteristics (section flags) */ 216 217 218 .ascii ".text" 219 .byte 0 220 .byte 0 221 .byte 0 /* end of 0 padding of section name */ 222 .long _end - real_start /* VirtualSize */ 223 .long real_start - efi_head /* VirtualAddress */ 224 .long __init_end_efi - real_start /* SizeOfRawData */ 225 .long real_start - efi_head /* PointerToRawData */ 226 227 .long 0 /* PointerToRelocations (0 for executables) */ 228 .long 0 /* PointerToLineNumbers (0 for executables) */ 229 .short 0 /* NumberOfRelocations (0 for executables) */ 230 .short 0 /* NumberOfLineNumbers (0 for executables) */ 231 .long 0xe0500020 /* Characteristics (section flags) */ 232 .align 5 233real_start: 234 /* BSS should be zeroed when booting without EFI */ 235 mov x26, #0 /* x26 := skip_zero_bss */ 236 237real_start_efi: 238 msr DAIFSet, 0xf /* Disable all interrupts */ 239 240 /* Save the bootloader arguments in less-clobberable registers */ 241 mov x21, x0 /* x21 := DTB, physical address */ 242 243 /* Find out where we are */ 244 ldr x0, =start 245 adr x19, start /* x19 := paddr (start) */ 246 sub x20, x19, x0 /* x20 := phys-offset */ 247 248 /* Using the DTB in the .dtb section? */ 249#ifdef CONFIG_DTB_FILE 250 ldr x21, =_sdtb 251 add x21, x21, x20 /* x21 := paddr(DTB) */ 252#endif 253 254 mov x22, #0 /* x22 := is_secondary_cpu */ 255 256 b common_start 257 258GLOBAL(init_secondary) 259 msr DAIFSet, 0xf /* Disable all interrupts */ 260 261 /* Find out where we are */ 262 ldr x0, =start 263 adr x19, start /* x19 := paddr (start) */ 264 sub x20, x19, x0 /* x20 := phys-offset */ 265 266 mov x22, #1 /* x22 := is_secondary_cpu */ 267 /* Boot CPU already zero BSS so skip it on secondary CPUs. */ 268 mov x26, #1 /* X26 := skip_zero_bss */ 269 270common_start: 271 mov x24, #0 /* x24 := CPU ID. Initialy zero until we 272 * find that multiprocessor extensions are 273 * present and the system is SMP */ 274 mrs x0, mpidr_el1 275 tbnz x0, _MPIDR_UP, 1f /* Uniprocessor system? */ 276 277 ldr x13, =(~MPIDR_HWID_MASK) 278 bic x24, x0, x13 /* Mask out flags to get CPU ID */ 2791: 280 281 /* Non-boot CPUs wait here until __cpu_up is ready for them */ 282 cbz x22, 1f 283 284 ldr x0, =smp_up_cpu 285 add x0, x0, x20 /* Apply physical offset */ 286 dsb sy 2872: ldr x1, [x0] 288 cmp x1, x24 289 beq 1f 290 wfe 291 b 2b 2921: 293 294#ifdef CONFIG_EARLY_PRINTK 295 ldr x23, =EARLY_UART_BASE_ADDRESS /* x23 := UART base address */ 296 cbnz x22, 1f 297 bl init_uart /* Boot CPU sets up the UART too */ 2981: PRINT("- CPU ") 299 mov x0, x24 300 bl putn 301 PRINT(" booting -\r\n") 302#endif 303 304 PRINT("- Current EL ") 305 mrs x4, CurrentEL 306 mov x0, x4 307 bl putn 308 PRINT(" -\r\n") 309 310 /* Are we in EL2 */ 311 cmp x4, #PSR_MODE_EL2t 312 ccmp x4, #PSR_MODE_EL2h, #0x4, ne 313 b.eq el2 /* Yes */ 314 315 /* OK, we're boned. */ 316 PRINT("- Xen must be entered in NS EL2 mode -\r\n") 317 PRINT("- Please update the bootloader -\r\n") 318 b fail 319 320el2: PRINT("- Xen starting at EL2 -\r\n") 321 322 /* Zero BSS only when requested */ 323 cbnz x26, skip_bss 324 325 PRINT("- Zero BSS -\r\n") 326 ldr x0, =__bss_start /* Load start & end of bss */ 327 ldr x1, =__bss_end 328 add x0, x0, x20 /* Apply physical offset */ 329 add x1, x1, x20 330 3311: str xzr, [x0], #8 332 cmp x0, x1 333 b.lo 1b 334 335skip_bss: 336 PRINT("- Setting up control registers -\r\n") 337 338 /* XXXX call PROCINFO_cpu_init here */ 339 340 /* Set up memory attribute type tables */ 341 ldr x0, =MAIRVAL 342 msr mair_el2, x0 343 344 /* Set up TCR_EL2: 345 * PS -- Based on ID_AA64MMFR0_EL1.PARange 346 * Top byte is used 347 * PT walks use Inner-Shareable accesses, 348 * PT walks are write-back, write-allocate in both cache levels, 349 * 48-bit virtual address space goes through this table. */ 350 ldr x0, =(TCR_RES1|TCR_SH0_IS|TCR_ORGN0_WBWA|TCR_IRGN0_WBWA|TCR_T0SZ(64-48)) 351 /* ID_AA64MMFR0_EL1[3:0] (PARange) corresponds to TCR_EL2[18:16] (PS) */ 352 mrs x1, ID_AA64MMFR0_EL1 353 bfi x0, x1, #16, #3 354 355 msr tcr_el2, x0 356 357 /* Set up the SCTLR_EL2: 358 * Exceptions in LE ARM, 359 * Low-latency IRQs disabled, 360 * Write-implies-XN disabled (for now), 361 * D-cache disabled (for now), 362 * I-cache enabled, 363 * Alignment checking disabled, 364 * MMU translation disabled (for now). */ 365 ldr x0, =(HSCTLR_BASE) 366 msr SCTLR_EL2, x0 367 368 /* Ensure that any exceptions encountered at EL2 369 * are handled using the EL2 stack pointer, rather 370 * than SP_EL0. */ 371 msr spsel, #1 372 373 /* Rebuild the boot pagetable's first-level entries. The structure 374 * is described in mm.c. 375 * 376 * After the CPU enables paging it will add the fixmap mapping 377 * to these page tables, however this may clash with the 1:1 378 * mapping. So each CPU must rebuild the page tables here with 379 * the 1:1 in place. */ 380 381 /* If Xen is loaded at exactly XEN_VIRT_START then we don't 382 * need an additional 1:1 mapping, the virtual mapping will 383 * suffice. 384 */ 385 cmp x19, #XEN_VIRT_START 386 cset x25, eq /* x25 := identity map in place, or not */ 387 388 /* Write Xen's PT's paddr into TTBR0_EL2 */ 389 ldr x4, =boot_pgtable 390 add x4, x4, x20 /* x4 := paddr (boot_pagetable) */ 391 msr TTBR0_EL2, x4 392 393 /* Setup boot_pgtable: */ 394 ldr x1, =boot_first 395 add x1, x1, x20 /* x1 := paddr (boot_first) */ 396 397 /* ... map boot_first in boot_pgtable[0] */ 398 mov x3, #PT_PT /* x2 := table map of boot_first */ 399 orr x2, x1, x3 /* + rights for linear PT */ 400 str x2, [x4, #0] /* Map it in slot 0 */ 401 402 /* ... map of paddr(start) in boot_pgtable+boot_first_id */ 403 lsr x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */ 404 cbz x1, 1f /* It's in slot 0, map in boot_first 405 * or boot_second later on */ 406 407 /* Level zero does not support superpage mappings, so we have 408 * to use an extra first level page in which we create a 1GB mapping. 409 */ 410 ldr x2, =boot_first_id 411 add x2, x2, x20 /* x2 := paddr (boot_first_id) */ 412 413 mov x3, #PT_PT /* x2 := table map of boot_first_id */ 414 orr x2, x2, x3 /* + rights for linear PT */ 415 lsl x1, x1, #3 /* x1 := Slot offset */ 416 str x2, [x4, x1] 417 418 ldr x4, =boot_first_id /* Next level into boot_first_id */ 419 add x4, x4, x20 /* x4 := paddr(boot_first_id) */ 420 421 lsr x1, x19, #FIRST_SHIFT /* x1 := Offset of base paddr in boot_first_id */ 422 lsl x2, x1, #FIRST_SHIFT /* x2 := Base address for 1GB mapping */ 423 mov x3, #PT_MEM /* x2 := Section map */ 424 orr x2, x2, x3 425 and x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */ 426 lsl x1, x1, #3 427 str x2, [x4, x1] /* Mapping of paddr(start) */ 428 mov x25, #1 /* x25 := identity map now in place */ 429 4301: /* Setup boot_first: */ 431 ldr x4, =boot_first /* Next level into boot_first */ 432 add x4, x4, x20 /* x4 := paddr(boot_first) */ 433 434 /* ... map boot_second in boot_first[0] */ 435 ldr x1, =boot_second 436 add x1, x1, x20 /* x1 := paddr(boot_second) */ 437 mov x3, #PT_PT /* x2 := table map of boot_second */ 438 orr x2, x1, x3 /* + rights for linear PT */ 439 str x2, [x4, #0] /* Map it in slot 0 */ 440 441 /* ... map of paddr(start) in boot_first */ 442 cbnz x25, 1f /* x25 is set if already created */ 443 lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */ 444 and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */ 445 cbz x1, 1f /* It's in slot 0, map in boot_second */ 446 447 lsl x2, x2, #FIRST_SHIFT /* Base address for 1GB mapping */ 448 mov x3, #PT_MEM /* x2 := Section map */ 449 orr x2, x2, x3 450 lsl x1, x1, #3 /* x1 := Slot offset */ 451 str x2, [x4, x1] /* Create mapping of paddr(start)*/ 452 mov x25, #1 /* x25 := identity map now in place */ 453 4541: /* Setup boot_second: */ 455 ldr x4, =boot_second /* Next level into boot_second */ 456 add x4, x4, x20 /* x4 := paddr(boot_second) */ 457 458 /* ... map boot_third in boot_second[1] */ 459 ldr x1, =boot_third 460 add x1, x1, x20 /* x1 := paddr(boot_third) */ 461 mov x3, #PT_PT /* x2 := table map of boot_third */ 462 orr x2, x1, x3 /* + rights for linear PT */ 463 str x2, [x4, #8] /* Map it in slot 1 */ 464 465 /* ... map of paddr(start) in boot_second */ 466 cbnz x25, 1f /* x25 is set if already created */ 467 lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */ 468 and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */ 469 cmp x1, #1 470 b.eq virtphys_clash /* It's in slot 1, which we cannot handle */ 471 472 lsl x2, x2, #SECOND_SHIFT /* Base address for 2MB mapping */ 473 mov x3, #PT_MEM /* x2 := Section map */ 474 orr x2, x2, x3 475 lsl x1, x1, #3 /* x1 := Slot offset */ 476 str x2, [x4, x1] /* Create mapping of paddr(start)*/ 477 mov x25, #1 /* x25 := identity map now in place */ 478 4791: /* Setup boot_third: */ 480 ldr x4, =boot_third 481 add x4, x4, x20 /* x4 := paddr (boot_third) */ 482 483 lsr x2, x19, #THIRD_SHIFT /* Base address for 4K mapping */ 484 lsl x2, x2, #THIRD_SHIFT 485 mov x3, #PT_MEM_L3 /* x2 := Section map */ 486 orr x2, x2, x3 487 488 /* ... map of vaddr(start) in boot_third */ 489 mov x1, xzr 4901: str x2, [x4, x1] /* Map vaddr(start) */ 491 add x2, x2, #PAGE_SIZE /* Next page */ 492 add x1, x1, #8 /* Next slot */ 493 cmp x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */ 494 b.lt 1b 495 496 /* Defer fixmap and dtb mapping until after paging enabled, to 497 * avoid them clashing with the 1:1 mapping. */ 498 499 /* boot pagetable setup complete */ 500 501 cbnz x25, 1f /* Did we manage to create an identity mapping ? */ 502 PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n") 503 b fail 504virtphys_clash: 505 /* Identity map clashes with boot_third, which we cannot handle yet */ 506 PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n") 507 b fail 508 5091: 510 PRINT("- Turning on paging -\r\n") 511 512 ldr x1, =paging /* Explicit vaddr, not RIP-relative */ 513 mrs x0, SCTLR_EL2 514 orr x0, x0, #SCTLR_M /* Enable MMU */ 515 orr x0, x0, #SCTLR_C /* Enable D-cache */ 516 dsb sy /* Flush PTE writes and finish reads */ 517 msr SCTLR_EL2, x0 /* now paging is enabled */ 518 isb /* Now, flush the icache */ 519 br x1 /* Get a proper vaddr into PC */ 520paging: 521 522 /* Now we can install the fixmap and dtb mappings, since we 523 * don't need the 1:1 map any more */ 524 dsb sy 525#if defined(CONFIG_EARLY_PRINTK) /* Fixmap is only used by early printk */ 526 /* Non-boot CPUs don't need to rebuild the fixmap itself, just 527 * the mapping from boot_second to xen_fixmap */ 528 cbnz x22, 1f 529 530 /* Add UART to the fixmap table */ 531 ldr x1, =xen_fixmap /* x1 := vaddr (xen_fixmap) */ 532 lsr x2, x23, #THIRD_SHIFT 533 lsl x2, x2, #THIRD_SHIFT /* 4K aligned paddr of UART */ 534 mov x3, #PT_DEV_L3 535 orr x2, x2, x3 /* x2 := 4K dev map including UART */ 536 str x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap's slot */ 5371: 538 539 /* Map fixmap into boot_second */ 540 ldr x4, =boot_second /* x4 := vaddr (boot_second) */ 541 ldr x2, =xen_fixmap 542 add x2, x2, x20 /* x2 := paddr (xen_fixmap) */ 543 mov x3, #PT_PT 544 orr x2, x2, x3 /* x2 := table map of xen_fixmap */ 545 ldr x1, =FIXMAP_ADDR(0) 546 lsr x1, x1, #(SECOND_SHIFT - 3) /* x1 := Slot for FIXMAP(0) */ 547 str x2, [x4, x1] /* Map it in the fixmap's slot */ 548 549 /* Use a virtual address to access the UART. */ 550 ldr x23, =EARLY_UART_VIRTUAL_ADDRESS 551#endif 552 553 /* 554 * Flush the TLB in case the 1:1 mapping happens to clash with 555 * the virtual addresses used by the fixmap or DTB. 556 */ 557 dsb sy /* Ensure any page table updates made above 558 * have occurred. */ 559 560 isb 561 tlbi alle2 562 dsb sy /* Ensure completion of TLB flush */ 563 isb 564 565 PRINT("- Ready -\r\n") 566 567 /* The boot CPU should go straight into C now */ 568 cbz x22, launch 569 570 /* Non-boot CPUs need to move on to the proper pagetables, which were 571 * setup in init_secondary_pagetables. */ 572 573 ldr x4, =init_ttbr /* VA of TTBR0_EL2 stashed by CPU 0 */ 574 ldr x4, [x4] /* Actual value */ 575 dsb sy 576 msr TTBR0_EL2, x4 577 dsb sy 578 isb 579 tlbi alle2 580 dsb sy /* Ensure completion of TLB flush */ 581 isb 582 583launch: 584 ldr x0, =init_data 585 add x0, x0, #INITINFO_stack /* Find the boot-time stack */ 586 ldr x0, [x0] 587 add x0, x0, #STACK_SIZE /* (which grows down from the top). */ 588 sub x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */ 589 mov sp, x0 590 591 mov x0, x20 /* Marshal args: - phys_offset */ 592 mov x1, x21 /* - FDT */ 593 mov x2, x24 /* - CPU ID */ 594 cbnz x22, 1f 595 b start_xen /* and disappear into the land of C */ 5961: 597 b start_secondary /* (to the appropriate entry point) */ 598 599/* Fail-stop */ 600fail: PRINT("- Boot failed -\r\n") 6011: wfe 602 b 1b 603 604GLOBAL(_end_boot) 605 606/* Copy Xen to new location and switch TTBR 607 * x0 ttbr 608 * x1 source address 609 * x2 destination address 610 * x3 length 611 * 612 * Source and destination must be word aligned, length is rounded up 613 * to a 16 byte boundary. 614 * 615 * MUST BE VERY CAREFUL when saving things to RAM over the copy */ 616ENTRY(relocate_xen) 617 /* Copy 16 bytes at a time using: 618 * x9: counter 619 * x10: data 620 * x11: data 621 * x12: source 622 * x13: destination 623 */ 624 mov x9, x3 625 mov x12, x1 626 mov x13, x2 627 6281: ldp x10, x11, [x12], #16 629 stp x10, x11, [x13], #16 630 631 subs x9, x9, #16 632 bgt 1b 633 634 /* Flush destination from dcache using: 635 * x9: counter 636 * x10: step 637 * x11: vaddr 638 */ 639 dsb sy /* So the CPU issues all writes to the range */ 640 641 mov x9, x3 642 ldr x10, =cacheline_bytes /* x10 := step */ 643 ldr x10, [x10] 644 mov x11, x2 645 6461: dc cvac, x11 647 648 add x11, x11, x10 649 subs x9, x9, x10 650 bgt 1b 651 652 dsb sy /* Ensure the flushes happen before 653 * continuing */ 654 isb /* Ensure synchronization with previous 655 * changes to text */ 656 tlbi alle2 /* Flush hypervisor TLB */ 657 ic iallu /* Flush I-cache */ 658 dsb sy /* Ensure completion of TLB flush */ 659 isb 660 661 msr TTBR0_EL2, x0 662 663 isb /* Ensure synchronization with previous 664 * changes to text */ 665 tlbi alle2 /* Flush hypervisor TLB */ 666 ic iallu /* Flush I-cache */ 667 dsb sy /* Ensure completion of TLB flush */ 668 isb 669 670 ret 671 672#ifdef CONFIG_EARLY_PRINTK 673/* Bring up the UART. 674 * x23: Early UART base address 675 * Clobbers x0-x1 */ 676init_uart: 677#ifdef EARLY_PRINTK_INIT_UART 678 early_uart_init x23, 0 679#endif 680 adr x0, 1f 681 b puts 6821: .asciz "- UART enabled -\r\n" 683 .align 4 684 685/* Print early debug messages. 686 * x0: Nul-terminated string to print. 687 * x23: Early UART base address 688 * Clobbers x0-x1 */ 689puts: 690 early_uart_ready x23, 1 691 ldrb w1, [x0], #1 /* Load next char */ 692 cbz w1, 1f /* Exit on nul */ 693 early_uart_transmit x23, w1 694 b puts 6951: 696 ret 697 698/* Print a 32-bit number in hex. Specific to the PL011 UART. 699 * x0: Number to print. 700 * x23: Early UART base address 701 * Clobbers x0-x3 */ 702putn: 703 adr x1, hex 704 mov x3, #8 7051: 706 early_uart_ready x23, 2 707 and x2, x0, #0xf0000000 /* Mask off the top nybble */ 708 lsr x2, x2, #28 709 ldrb w2, [x1, x2] /* Convert to a char */ 710 early_uart_transmit x23, w2 711 lsl x0, x0, #4 /* Roll it through one nybble at a time */ 712 subs x3, x3, #1 713 b.ne 1b 714 ret 715 716hex: .ascii "0123456789abcdef" 717 .align 2 718 719#else /* CONFIG_EARLY_PRINTK */ 720 721ENTRY(early_puts) 722init_uart: 723puts: 724putn: ret 725 726#endif /* !CONFIG_EARLY_PRINTK */ 727 728/* This provides a C-API version of __lookup_processor_type 729 * TODO: For now, the implementation return NULL every time 730 */ 731ENTRY(lookup_processor_type) 732 mov x0, #0 733 ret 734/* 735 * Function to transition from EFI loader in C, to Xen entry point. 736 * void noreturn efi_xen_start(void *fdt_ptr, uint32_t fdt_size); 737 */ 738ENTRY(efi_xen_start) 739 /* 740 * Preserve x0 (fdt pointer) across call to __flush_dcache_area, 741 * restore for entry into Xen. 742 */ 743 mov x20, x0 744 745 /* flush dcache covering the FDT updated by EFI boot code */ 746 bl __flush_dcache_area 747 748 /* 749 * Flush dcache covering current runtime addresses 750 * of xen text/data. Then flush all of icache. 751 */ 752 adrp x1, _start 753 add x1, x1, #:lo12:_start 754 mov x0, x1 755 adrp x2, _end 756 add x2, x2, #:lo12:_end 757 sub x1, x2, x1 758 759 bl __flush_dcache_area 760 ic ialluis 761 tlbi alle2 762 763 /* 764 * Turn off cache and MMU as Xen expects. EFI enables them, but also 765 * mandates a 1:1 (unity) VA->PA mapping, so we can turn off the 766 * MMU while executing EFI code before entering Xen. 767 * The EFI loader calls this to start Xen. 768 */ 769 770 /* Turn off Dcache and MMU */ 771 mrs x0, sctlr_el2 772 bic x0, x0, #1 << 0 /* clear SCTLR.M */ 773 bic x0, x0, #1 << 2 /* clear SCTLR.C */ 774 msr sctlr_el2, x0 775 isb 776 777 /* Jump to Xen entry point */ 778 mov x0, x20 779 mov x1, xzr 780 mov x2, xzr 781 mov x3, xzr 782 /* 783 * The EFI stub and Xen may share some information living in 784 * BSS. Don't zero BSS to avoid loosing them. 785 * 786 * Note that the EFI firmware has already zeroed BSS for us 787 * before jump into the stub. 788 */ 789 mov x26, #1 /* x26 := skip_zero_bss */ 790 791 b real_start_efi 792ENDPROC(efi_xen_start) 793 794/* 795 * Local variables: 796 * mode: ASM 797 * indent-tabs-mode: nil 798 * End: 799 */ 800