1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "trampoline.h"
6
7 #include <inttypes.h>
8 #include <stddef.h>
9 #include <zircon/compiler.h>
10
11 // Populate the trampoline area and enter the kernel in 64-bit mode. Paging is
12 // already enabled. The page tables, the ZBI image (which includes the kernel
13 // item), and the trampoline area are all placed safely outside the kernel's
14 // range: PHYS_LOAD_ADDRESS + kernel image size + kernel bss size.
boot_zbi(const zbi_header_t * zbi,const zbi_header_t * kernel_item,struct trampoline * trampoline)15 noreturn void boot_zbi(const zbi_header_t* zbi,
16 const zbi_header_t* kernel_item,
17 struct trampoline* trampoline) {
18 // The kernel image includes its own container and item headers.
19 const size_t kernel_size = kernel_item->length + (2 * sizeof(zbi_header_t));
20
21 // The header inside the kernel item payload gives the entry point as an
22 // absolute physical address.
23 const zbi_kernel_t* kernel_header = (void*)(kernel_item + 1);
24 uint32_t kernel_entry = kernel_header->entry;
25 if (unlikely(kernel_entry != kernel_header->entry)) {
26 panic("ZBI kernel entry point %#llx truncated to %#"PRIx32,
27 kernel_header->entry, kernel_entry);
28 }
29 if (unlikely(kernel_entry < (uintptr_t)PHYS_LOAD_ADDRESS ||
30 kernel_entry >= (uintptr_t)PHYS_LOAD_ADDRESS + kernel_size)) {
31 panic("ZBI kernel entry point %#"PRIx32" outside kernel [%p, %p)",
32 kernel_entry, PHYS_LOAD_ADDRESS,
33 PHYS_LOAD_ADDRESS + kernel_size);
34 }
35
36 // The headers matter for the address arithmetic of where the image gets
37 // placed. But the kernel doesn't actually look at those headers, so they
38 // don't need to be filled in.
39 const uint8_t* copy_src = (const void*)(kernel_header + 1);
40 uint8_t* copy_dest =
41 PHYS_LOAD_ADDRESS + offsetof(zircon_kernel_t, contents);
42 const size_t copy_size = kernel_item->length;
43
44 // The descriptor needed to load the new GDT can be placed on the stack.
45 const struct { uint16_t limit; void* base; } __PACKED lgdt = {
46 .base = trampoline->gdt,
47 .limit = sizeof(trampoline->gdt) - 1,
48 };
49
50 // The trampoline area holds the 64-bit trampoline code we'll run, the
51 // GDT with the 64-bit code segment we'll run it in, and the long jump
52 // descriptor we'll use to get there.
53 *trampoline = (struct trampoline){
54 .code = TRAMPOLINE_CODE,
55 .gdt = GDT_ENTRIES,
56 .ljmp = {
57 .eip = trampoline->code,
58 .cs = 1 << 3,
59 },
60 };
61
62 // Tell the compiler all of the trampoline area is read.
63 // Otherwise it might conclude that only gdt and ljmp are used.
64 __asm__ volatile("" :: "m"(*trampoline));
65
66 __asm__ volatile(
67 // Load the GDT stored safely in the trampoline area. We can
68 // access the descriptor via the stack segment and stack pointer
69 // using the Multiboot-provided flat segments. Hereafter we can
70 // use only the registers and the already-running code and data
71 // segments, since there are no 32-bit segments in the new GDT.
72 "lgdt %[lgdt]\n\t"
73 // Jump into the 64-bit trampoline code. The jump descriptor
74 // resides in the trampoline area, so the compiler will access it
75 // through a non-stack register here.
76 "ljmp *%[ljmp]\n\t"
77 :: [lgdt]"m"(lgdt), [ljmp]"m"(trampoline->ljmp),
78 // The 64-bit trampoline code copies the kernel into place and
79 // then jumps to its entry point, as instructed here:
80 "D"(copy_dest), // %rdi: destination pointer
81 "S"(copy_src), // %rsi: source pointer
82 "c"(copy_size / 8), // %rcx: count of 8-byte words
83 "a"(kernel_entry), // %rax: kernel entry point
84 "b"(zbi) // %rbx: ZBI data pointer for kernel
85 );
86 __builtin_unreachable();
87 }
88