1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2016 Google, Inc.
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <asm.h>
9#include <arch/x86/asm.h>
10#include <zircon/boot/image.h>
11
12
13// This file lays out the final kernel image seen by the boot loader.
14// It concatenates:
15//     1. the boot loader headers
16//     2. the actual kernel image (converted from the kernel ELF file)
17//     3. the fixup code to relocate the kernel image
18// The headers must tell the boot loader to load the whole combined image,
19// and leave enough space in memory after it for the bss.  The fixup code
20// in the image overlaps with the start of the kernel's bss, so start.S
21// will move it to after the bss.  Hence the headers must tell the boot
22// loader to leave enough space for that copy too.
23//
24// The label arithmetic to define the header fields only works because this
25// whole file is all in the same section (.text).  Because it's all just
26// one big section and there are no relocs to absolute locations within
27// this section, it really doesn't matter what memory layout the linker
28// thinks it's doing, but nonetheless image.ld produces an ELF segment
29// layout faithful to the physical memory picture (except that it's
30// actually position-independent).  The addresses in the ELF headers of the
31// final image.elf file are completely ignored because boot loaders don't
32// actually use that file.  It only exists to have the contents extracted
33// with objcopy -O binary.
34
35// Where the fixup code winds up in memory exactly overlaps the kernel's
36// .bss, which the kernel needs to zero before it's ready to run the fixup
37// code.  So move_fixups_and_zero_bss (in start.S) copies the fixup code to
38// scratch memory starting at IMAGE_MEMORY_END (i.e., right after the
39// kernel's .bss).  So add the fixup code size.
40//
41// The zbi_kernel_t header records this as a number of bytes after the
42// image, rather than as an address.
43#define boot_bss_end (IMAGE_MEMORY_END + IMAGE_RESERVE_SIZE)
44
45.globl IMAGE_RESERVE_SIZE
46IMAGE_RESERVE_SIZE = apply_fixups_end - apply_fixups
47
48.text
49
50// ZBI file header (zbi_header_t)
51ZBI_CONTAINER_HEADER(_zbi_file_header, boot_load_end - _zbi_kernel_header)
52
53// ZBI kernel header (zbi_header_t)
54DATA(_zbi_kernel_header)
55    .int ZBI_TYPE_KERNEL_X64
56    .int boot_load_end - _zbi_kernel_payload
57    .int 0
58    .int ZBI_FLAG_VERSION
59    .int 0
60    .int 0
61    .int ZBI_ITEM_MAGIC
62    .int ZBI_ITEM_NO_CRC32
63END_DATA(_zbi_kernel_header)
64
65// ZBI_TYPE_KERNEL payload (zbi_kernel_t)
66DATA(_zbi_kernel_payload)
67    .quad PHYS(IMAGE_ELF_ENTRY)
68    .quad boot_bss_end - boot_load_end
69END_DATA(_zbi_kernel_payload)
70
71// Pad out to the header size that was allocated in the kernel image layout.
72// This ensures that the kernel image is aligned correctly in memory.
73.org BOOT_HEADER_SIZE
74
75// Include the kernel image itself, skipping the padding left for the headers.
76DATA(kernel_image)
77.incbin KERNEL_IMAGE, BOOT_HEADER_SIZE
78DATA(kernel_image_end)
79END_DATA(kernel_image)
80
81// Immediately after the kernel image comes the fixup code.
82// The start.S code sees this address as _end.
83
84// The first word encodes the size of the fixup code so it can be moved around.
85DATA(fixup_code_size)
86    .int apply_fixups_end - apply_fixups
87END_DATA(fixup_code_size)
88
89#define FIXUP_LOCATION(addr) (addr - KERNEL_BASE)(%rdi)
90
91// This code must be purely position-independent and have no relocs.
92// This is called with the runtime address of __code_start in %rdi.
93FUNCTION(apply_fixups)
94    mov %rdi, %rax
95    sub $KERNEL_BASE, %rax
96
97// The generated kernel-fixups.inc invokes this macro for each run of fixups.
98.macro fixup addr, n, stride
99.if \n == 1
100    // This instruction is 7 bytes.
101    add %rax, FIXUP_LOCATION(\addr)
102.elseif \n == 2
103    // So this pair is 14 bytes.
104    add %rax, FIXUP_LOCATION(\addr)
105    add %rax, FIXUP_LOCATION(\addr + \stride)
106.else
107    // This sequence is 21 bytes, so it's smaller for n > 3.
108    mov $\n, %ecx
109    lea FIXUP_LOCATION(\addr), %rdx
1100:
111    add %rax, (%rdx)
112    add $\stride, %rdx
113    loop 0b
114.endif
115.endm
116
117#include "kernel-fixups.inc"
118
119    ret
120
121DATA(apply_fixups_end)
122END_FUNCTION(apply_fixups)
123
124.balign 8
125DATA(boot_load_end)
126