1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <elfload/elfload.h>
6
7 #include <endian.h>
8 #include <limits.h>
9 #include <string.h>
10 #include <zircon/syscalls.h>
11
12 #if BYTE_ORDER == LITTLE_ENDIAN
13 # define MY_ELFDATA ELFDATA2LSB
14 #elif BYTE_ORDER == BIG_ENDIAN
15 # define MY_ELFDATA ELFDATA2MSB
16 #else
17 # error what byte order?
18 #endif
19
20 #if defined(__arm__)
21 # define MY_MACHINE EM_ARM
22 #elif defined(__aarch64__)
23 # define MY_MACHINE EM_AARCH64
24 #elif defined(__x86_64__)
25 # define MY_MACHINE EM_X86_64
26 #elif defined(__i386__)
27 # define MY_MACHINE EM_386
28 #else
29 # error what machine?
30 #endif
31
32 #define VMO_NAME_UNKNOWN "<unknown ELF file>"
33 #define VMO_NAME_PREFIX_BSS "bss:"
34 #define VMO_NAME_PREFIX_DATA "data:"
35
36 // NOTE! All code in this file must maintain the invariants that it's
37 // purely position-independent and uses no writable memory other than
38 // its own stack.
39
40 // hdr_buf represents bytes already read from the start of the file.
elf_load_prepare(zx_handle_t vmo,const void * hdr_buf,size_t buf_sz,elf_load_header_t * header,uintptr_t * phoff)41 zx_status_t elf_load_prepare(zx_handle_t vmo, const void* hdr_buf, size_t buf_sz,
42 elf_load_header_t* header, uintptr_t* phoff) {
43 // Read the file header and validate basic format sanity.
44 elf_ehdr_t ehdr;
45 if (buf_sz >= sizeof(ehdr)) {
46 memcpy(&ehdr, hdr_buf, sizeof(ehdr));
47 } else {
48 zx_status_t status = zx_vmo_read(vmo, &ehdr, 0, sizeof(ehdr));
49 if (status != ZX_OK)
50 return status;
51 }
52 if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
53 ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
54 ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
55 ehdr.e_ident[EI_MAG3] != ELFMAG3 ||
56 ehdr.e_ident[EI_CLASS] != MY_ELFCLASS ||
57 ehdr.e_ident[EI_DATA] != MY_ELFDATA ||
58 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
59 ehdr.e_phentsize != sizeof(elf_phdr_t) ||
60 ehdr.e_phnum == PN_XNUM ||
61 ehdr.e_machine != MY_MACHINE ||
62 // This code could easily support loading fixed-address ELF files
63 // (e_type == ET_EXEC). But the system overall doesn't support
64 // them. It's Fuchsia policy that all executables must be PIEs.
65 // So don't accept ET_EXEC files at all.
66 ehdr.e_type != ET_DYN)
67 return ERR_ELF_BAD_FORMAT;
68
69 // Cache the few other bits we need from the header, and we're good to go.
70 header->e_phnum = ehdr.e_phnum;
71 header->e_entry = ehdr.e_entry;
72 *phoff = ehdr.e_phoff;
73 return ZX_OK;
74 }
75
elf_load_read_phdrs(zx_handle_t vmo,elf_phdr_t phdrs[],uintptr_t phoff,size_t phnum)76 zx_status_t elf_load_read_phdrs(zx_handle_t vmo, elf_phdr_t phdrs[],
77 uintptr_t phoff, size_t phnum) {
78 size_t phdrs_size = (size_t)phnum * sizeof(elf_phdr_t);
79 return zx_vmo_read(vmo, phdrs, phoff, phdrs_size);
80 }
81
82 // An ET_DYN file can be loaded anywhere, so choose where. This
83 // allocates a VMAR to hold the image, and returns its handle and
84 // absolute address. This also computes the "load bias", which is the
85 // difference between p_vaddr values in this file and actual runtime
86 // addresses. (Usually the lowest p_vaddr in an ET_DYN file will be 0
87 // and so the load bias is also the load base address, but ELF does
88 // not require that the lowest p_vaddr be 0.)
choose_load_bias(zx_handle_t root_vmar,const elf_load_header_t * header,const elf_phdr_t phdrs[],zx_handle_t * vmar,uintptr_t * vmar_base,uintptr_t * bias)89 static zx_status_t choose_load_bias(zx_handle_t root_vmar,
90 const elf_load_header_t* header,
91 const elf_phdr_t phdrs[],
92 zx_handle_t* vmar,
93 uintptr_t* vmar_base,
94 uintptr_t* bias) {
95 // This file can be loaded anywhere, so the first thing is to
96 // figure out the total span it will need and reserve a span
97 // of address space that big. The kernel decides where to put it.
98
99 uintptr_t low = 0, high = 0;
100 for (uint_fast16_t i = 0; i < header->e_phnum; ++i) {
101 if (phdrs[i].p_type == PT_LOAD) {
102 uint_fast16_t j = header->e_phnum;
103 do {
104 --j;
105 } while (j > i && phdrs[j].p_type != PT_LOAD);
106 low = phdrs[i].p_vaddr & -PAGE_SIZE;
107 high = ((phdrs[j].p_vaddr +
108 phdrs[j].p_memsz + PAGE_SIZE - 1) & -PAGE_SIZE);
109 break;
110 }
111 }
112 // Sanity check. ELF requires that PT_LOAD phdrs be sorted in
113 // ascending p_vaddr order.
114 if (low > high)
115 return ERR_ELF_BAD_FORMAT;
116
117 const size_t span = high - low;
118 if (span == 0)
119 return ZX_OK;
120
121 // Allocate a VMAR to reserve the whole address range.
122 zx_status_t status = zx_vmar_allocate(root_vmar,
123 ZX_VM_CAN_MAP_READ |
124 ZX_VM_CAN_MAP_WRITE |
125 ZX_VM_CAN_MAP_EXECUTE |
126 ZX_VM_CAN_MAP_SPECIFIC,
127 0, span, vmar, vmar_base);
128 if (status == ZX_OK)
129 *bias = *vmar_base - low;
130 return status;
131 }
132
finish_load_segment(zx_handle_t vmar,zx_handle_t vmo,const char vmo_name[ZX_MAX_NAME_LEN],const elf_phdr_t * ph,size_t start_offset,size_t size,uintptr_t file_start,uintptr_t file_end,size_t partial_page)133 static zx_status_t finish_load_segment(
134 zx_handle_t vmar, zx_handle_t vmo, const char vmo_name[ZX_MAX_NAME_LEN],
135 const elf_phdr_t* ph, size_t start_offset, size_t size,
136 uintptr_t file_start, uintptr_t file_end, size_t partial_page) {
137 const zx_vm_option_t options = ZX_VM_SPECIFIC |
138 ((ph->p_flags & PF_R) ? ZX_VM_PERM_READ : 0) |
139 ((ph->p_flags & PF_W) ? ZX_VM_PERM_WRITE : 0) |
140 ((ph->p_flags & PF_X) ? ZX_VM_PERM_EXECUTE : 0);
141
142 uintptr_t start;
143 if (ph->p_filesz == ph->p_memsz)
144 // Straightforward segment, map all the whole pages from the file.
145 return zx_vmar_map(vmar, options, start_offset, vmo, file_start, size,
146 &start);
147
148 const size_t file_size = file_end - file_start;
149
150 // This segment has some bss, so things are more complicated.
151 // Only the leading portion is directly mapped in from the file.
152 if (file_size > 0) {
153 zx_status_t status = zx_vmar_map(vmar, options, start_offset, vmo,
154 file_start, file_size, &start);
155 if (status != ZX_OK)
156 return status;
157
158 start_offset += file_size;
159 size -= file_size;
160 }
161
162 // The rest of the segment will be backed by anonymous memory.
163 zx_handle_t bss_vmo;
164 zx_status_t status = zx_vmo_create(size, 0, &bss_vmo);
165 if (status != ZX_OK)
166 return status;
167
168 char bss_vmo_name[ZX_MAX_NAME_LEN] = VMO_NAME_PREFIX_BSS;
169 memcpy(&bss_vmo_name[sizeof(VMO_NAME_PREFIX_BSS) - 1],
170 vmo_name, ZX_MAX_NAME_LEN - sizeof(VMO_NAME_PREFIX_BSS));
171 status = zx_object_set_property(bss_vmo, ZX_PROP_NAME,
172 bss_vmo_name, strlen(bss_vmo_name));
173 if (status != ZX_OK) {
174 zx_handle_close(bss_vmo);
175 return status;
176 }
177
178 // The final partial page of initialized data falls into the
179 // region backed by bss_vmo rather than (the file) vmo. We need
180 // to read that data out of the file and copy it into bss_vmo.
181 if (partial_page > 0) {
182 char buffer[PAGE_SIZE];
183 status = zx_vmo_read(vmo, buffer, file_end, partial_page);
184 if (status != ZX_OK) {
185 zx_handle_close(bss_vmo);
186 return status;
187 }
188 status = zx_vmo_write(bss_vmo, buffer, 0, partial_page);
189 if (status != ZX_OK) {
190 zx_handle_close(bss_vmo);
191 return status;
192 }
193 }
194
195 status = zx_vmar_map(vmar, options, start_offset, bss_vmo, 0, size, &start);
196 zx_handle_close(bss_vmo);
197
198 return status;
199 }
200
load_segment(zx_handle_t vmar,size_t vmar_offset,zx_handle_t vmo,const char * vmo_name,const elf_phdr_t * ph)201 static zx_status_t load_segment(zx_handle_t vmar, size_t vmar_offset,
202 zx_handle_t vmo, const char* vmo_name,
203 const elf_phdr_t* ph) {
204 // The p_vaddr can start in the middle of a page, but the
205 // semantics are that all the whole pages containing the
206 // p_vaddr+p_filesz range are mapped in.
207 size_t start = (size_t)ph->p_vaddr + vmar_offset;
208 size_t end = start + ph->p_memsz;
209 start &= -PAGE_SIZE;
210 end = (end + PAGE_SIZE - 1) & -PAGE_SIZE;
211 size_t size = end - start;
212
213 // Nothing to do for an empty segment (degenerate case).
214 if (size == 0)
215 return ZX_OK;
216
217 uintptr_t file_start = (uintptr_t)ph->p_offset;
218 uintptr_t file_end = file_start + ph->p_filesz;
219 const size_t partial_page = file_end & (PAGE_SIZE - 1);
220 file_start &= -PAGE_SIZE;
221 file_end &= -PAGE_SIZE;
222
223 uintptr_t data_end =
224 (ph->p_offset + ph->p_filesz + PAGE_SIZE - 1) & -PAGE_SIZE;
225 const size_t data_size = data_end - file_start;
226
227 // With no writable data, it's the simple case.
228 if (!(ph->p_flags & PF_W) || data_size == 0)
229 return finish_load_segment(vmar, vmo, vmo_name, ph, start, size,
230 file_start, file_end, partial_page);
231
232 // For a writable segment, we need a writable VMO.
233 zx_handle_t writable_vmo;
234 zx_status_t status = zx_vmo_clone(vmo, ZX_VMO_CLONE_COPY_ON_WRITE,
235 file_start, data_size, &writable_vmo);
236 if (status == ZX_OK) {
237 char name[ZX_MAX_NAME_LEN] = VMO_NAME_PREFIX_DATA;
238 memcpy(&name[sizeof(VMO_NAME_PREFIX_DATA) - 1],
239 vmo_name, ZX_MAX_NAME_LEN - sizeof(VMO_NAME_PREFIX_DATA));
240 status = zx_object_set_property(writable_vmo, ZX_PROP_NAME,
241 name, strlen(name));
242 if (status == ZX_OK)
243 status = finish_load_segment(
244 vmar, writable_vmo, vmo_name, ph, start, size,
245 0, file_end - file_start, partial_page);
246 zx_handle_close(writable_vmo);
247 }
248 return status;
249 }
250
elf_load_map_segments(zx_handle_t root_vmar,const elf_load_header_t * header,const elf_phdr_t phdrs[],zx_handle_t vmo,zx_handle_t * segments_vmar,zx_vaddr_t * base,zx_vaddr_t * entry)251 zx_status_t elf_load_map_segments(zx_handle_t root_vmar,
252 const elf_load_header_t* header,
253 const elf_phdr_t phdrs[],
254 zx_handle_t vmo,
255 zx_handle_t* segments_vmar,
256 zx_vaddr_t* base, zx_vaddr_t* entry) {
257 char vmo_name[ZX_MAX_NAME_LEN];
258 if (zx_object_get_property(vmo, ZX_PROP_NAME,
259 vmo_name, sizeof(vmo_name)) != ZX_OK ||
260 vmo_name[0] == '\0')
261 memcpy(vmo_name, VMO_NAME_UNKNOWN, sizeof(VMO_NAME_UNKNOWN));
262
263 uintptr_t vmar_base = 0;
264 uintptr_t bias = 0;
265 zx_handle_t vmar = ZX_HANDLE_INVALID;
266 zx_status_t status = choose_load_bias(root_vmar, header, phdrs,
267 &vmar, &vmar_base, &bias);
268
269 size_t vmar_offset = bias - vmar_base;
270 for (uint_fast16_t i = 0; status == ZX_OK && i < header->e_phnum; ++i) {
271 if (phdrs[i].p_type == PT_LOAD)
272 status = load_segment(vmar, vmar_offset, vmo, vmo_name, &phdrs[i]);
273 }
274
275 if (status == ZX_OK && segments_vmar != NULL)
276 *segments_vmar = vmar;
277 else
278 zx_handle_close(vmar);
279
280 if (status == ZX_OK) {
281 if (base != NULL)
282 *base = vmar_base;
283 if (entry != NULL)
284 *entry = header->e_entry != 0 ? header->e_entry + bias : 0;
285 }
286 return status;
287 }
288
elf_load_find_interp(const elf_phdr_t phdrs[],size_t phnum,uintptr_t * interp_off,size_t * interp_len)289 bool elf_load_find_interp(const elf_phdr_t phdrs[], size_t phnum,
290 uintptr_t* interp_off, size_t* interp_len) {
291 for (size_t i = 0; i < phnum; ++i) {
292 if (phdrs[i].p_type == PT_INTERP) {
293 *interp_off = phdrs[i].p_offset;
294 *interp_len = phdrs[i].p_filesz;
295 return true;
296 }
297 }
298 return false;
299 }
300