1 /*
2  * Copyright (C) 2021-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <asm/guest/vm.h>
8 #include <asm/guest/ept.h>
9 #include <asm/mmu.h>
10 #include <vboot.h>
11 #include <elf.h>
12 #include <logmsg.h>
13 #include <vacpi.h>
14 
15 /* Define a memory block to store ELF format VM load params in guest address space
16  * The params including:
17  *	MISC info: 1KB
18  *		including: Init GDT(40 bytes),ACRN ELF loader name(20 bytes), ACPI RSDP table(36 bytes).
19  *	Multiboot info : 4KB
20  *	Boot cmdline : 2KB
21  *	memory map : 20KB (enough to put memory entries for multiboot 0.6.96 or multiboot 2.0)
22  * Each param should keep 8byte aligned and the total region should be able to put below MEM_1M.
23  * The total params size is:
24  * (MEM_1K + MEM_4K + MEM_2K + 20K) = 27KB
25  */
26 
27 struct elf_boot_para {
28 	char init_gdt[40];
29 	char loader_name[20];
30 	struct acpi_table_rsdp rsdp;
31 	struct multiboot_info mb_info;
32 	char cmdline[MEM_2K];
33 	char mmap[MEM_4K * 5U];
34 } __aligned(8);
35 
prepare_elf_cmdline(struct acrn_vm * vm,uint64_t param_cmd_gpa)36 int32_t prepare_elf_cmdline(struct acrn_vm *vm, uint64_t param_cmd_gpa)
37 {
38 	return copy_to_gpa(vm, vm->sw.bootargs_info.src_addr, param_cmd_gpa,
39 		           vm->sw.bootargs_info.size);
40 }
41 
prepare_multiboot_mmap(struct acrn_vm * vm,uint64_t param_mmap_gpa)42 uint32_t prepare_multiboot_mmap(struct acrn_vm *vm, uint64_t param_mmap_gpa)
43 {
44 	uint32_t i, mmap_length = 0U;
45 	struct multiboot_mmap mmap_entry;
46 	uint64_t mmap_gpa = param_mmap_gpa;
47 
48 	for (i = 0U; i < vm->e820_entry_num; i++) {
49 		mmap_entry.size = 20U;
50 		mmap_entry.baseaddr = vm->e820_entries[i].baseaddr;
51 		mmap_entry.length = vm->e820_entries[i].length;
52 		mmap_entry.type = vm->e820_entries[i].type;
53 		if (mmap_entry.type > MULTIBOOT_MEMORY_BADRAM) {
54 			mmap_entry.type = MULTIBOOT_MEMORY_RESERVED;
55 		}
56 
57 		if (copy_to_gpa(vm, &mmap_entry, mmap_gpa,
58 			sizeof(struct multiboot_mmap)) != 0U) {
59 			mmap_length = 0U;
60 			break;
61 		}
62 		mmap_gpa += sizeof(struct multiboot_mmap);
63 		mmap_length += sizeof(struct multiboot_mmap);
64 	}
65 
66 	return mmap_length;
67 }
68 
prepare_loader_name(struct acrn_vm * vm,uint64_t param_ldrname_gpa)69 uint32_t prepare_loader_name(struct acrn_vm *vm, uint64_t param_ldrname_gpa)
70 {
71 	char loader_name[MAX_LOADER_NAME_SIZE] = "ACRN ELF LOADER";
72 
73 	return (copy_to_gpa(vm, (void *)loader_name, param_ldrname_gpa,
74 		MAX_LOADER_NAME_SIZE));
75 }
76 
77 /**
78  * @pre vm != NULL
79  * must run in stac/clac context
80  */
do_load_elf64(struct acrn_vm * vm)81 static void *do_load_elf64(struct acrn_vm *vm)
82 {
83 	struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
84 	void *p_elf_img = (void *)sw_kernel->kernel_src_addr;
85 	struct elf64_hdr *p_elf_header64 = (struct elf64_hdr *)p_elf_img;
86 	struct elf64_prog_entry *p_prg_tbl_head64;
87 	struct elf64_sec_entry *p_sec_tbl_head64, *p_shstr_tbl_head64;
88 	const char *p_shstr_tbl, *p_sec_name;
89 	void *elf_entry = NULL, *p_elf_bss = NULL;
90 	uint32_t i;
91 
92 	/* Currently only ET_EXEC is supported */
93 	if (p_elf_header64->e_type == ET_EXEC) {
94 		p_prg_tbl_head64 = (struct elf64_prog_entry *)(p_elf_img + p_elf_header64->e_phoff);
95 		/* Prepare program entries */
96 		for (i = 0U; i < p_elf_header64->e_phnum; i++) {
97 			/**
98 			 * We now only support PT_LOAD type. It needs to copy from file to ram
99 			 * TODO: More program types may be needed here
100 			 */
101 			if (p_prg_tbl_head64->p_type == PT_LOAD) {
102 				/**
103 				 * copy_to_gpa will check whether the gpa is in EPT, and print message
104 				 * if anything wrong.
105 				 * However, the guest OS may still fail to boot if they load segments
106 				 * to invalid gpa such as ACPI area defined in ve820.
107 				 *
108 				 * We assume that the guest elf can put segments to valid gpa.
109 				 */
110 				(void)copy_to_gpa(vm, p_elf_img + p_prg_tbl_head64->p_offset,
111 					p_prg_tbl_head64->p_paddr, (uint32_t)p_prg_tbl_head64->p_filesz);
112 				/* copy_to_gpa has its own stac/clac inside. Call stac again here to keep
113 				 * the context. */
114 				stac();
115 			}
116 			p_prg_tbl_head64++;
117 		}
118 
119 		/* Find and clear bss sections */
120 		p_sec_tbl_head64 = (struct elf64_sec_entry *)(p_elf_img + p_elf_header64->e_shoff);
121 		p_shstr_tbl_head64 = p_sec_tbl_head64 + p_elf_header64->e_shstrndx;
122 		p_shstr_tbl = (char *)(p_elf_img + p_shstr_tbl_head64->sh_offset);
123 		/* Currently we don't support relocatable sections(sh_type is SHT_REL or SHT_RELA).
124 			Assume that the guest elf do not have relocatable sections. */
125 		for (i = 0U; i < p_elf_header64->e_shnum; i++) {
126 			/* A section entry's name is an offset, real string is in string tab */
127 			p_sec_name = p_shstr_tbl + p_sec_tbl_head64->sh_name;
128 			if ((strncmp(p_sec_name, "bss", 3) == 0) || (strncmp(p_sec_name, ".bss", 4) == 0)) {
129 				p_elf_bss = gpa2hva(vm, p_sec_tbl_head64->sh_addr);
130 				memset(p_elf_bss, 0U, p_sec_tbl_head64->sh_size);
131 			}
132 			p_sec_tbl_head64++;
133 		}
134 
135 		elf_entry = (void *)p_elf_header64->e_entry;
136 	} else {
137 		pr_err("%s, elf type(%x) not supported!", __func__, p_elf_header64->e_type);
138 	}
139 	/* For 64bit elf, entry address above 4G is not currently supported. Assume that it's below 4G. */
140 	return elf_entry;
141 }
142 
143 /**
144  * @pre vm != NULL
145  * must run in stac/clac context
146  */
do_load_elf32(struct acrn_vm * vm)147 static void *do_load_elf32(struct acrn_vm *vm)
148 {
149 	struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
150 	void *p_elf_img = (void *)sw_kernel->kernel_src_addr;
151 	struct elf32_hdr *p_elf_header32 = (struct elf32_hdr *)p_elf_img;
152 	struct elf32_prog_entry *p_prg_tbl_head32;
153 	struct elf32_sec_entry *p_sec_tbl_head32, *p_shstr_tbl_head32;
154 	const char *p_shstr_tbl, *p_sec_name;
155 	void *elf_entry = NULL, *p_elf_bss = NULL;
156 	uint32_t i;
157 
158 	/* Currently only ET_EXEC is supported */
159 	if (p_elf_header32->e_type == ET_EXEC) {
160 		p_prg_tbl_head32 = (struct elf32_prog_entry *)(p_elf_img + p_elf_header32->e_phoff);
161 		/* Copy program entries */
162 		for (i = 0U; i < p_elf_header32->e_phnum; i++) {
163 			/**
164 			 * We now only support PT_LOAD type. It needs to copy from file to ram
165 			 * TODO: More program types may be needed here
166 			 */
167 			if (p_prg_tbl_head32->p_type == PT_LOAD) {
168 				/**
169 				 * copy_to_gpa will check whether the gpa is in EPT, and print message
170 				 * if anything wrong.
171 				 * However, the guest OS may still fail to boot if they load segments
172 				 * to invalid gpa such as ACPI area defined in ve820.
173 				 *
174 				 * We assume that the guest elf can put segments to valid gpa.
175 				 */
176 				(void)copy_to_gpa(vm, p_elf_img + p_prg_tbl_head32->p_offset,
177 					p_prg_tbl_head32->p_paddr, p_prg_tbl_head32->p_filesz);
178 				/* copy_to_gpa has its own stac/clac inside. Call stac again here to keep
179 				 * the context. */
180 				stac();
181 			}
182 			p_prg_tbl_head32++;
183 		}
184 
185 		/* Find and clear bss sections */
186 		p_sec_tbl_head32 = (struct elf32_sec_entry *)(p_elf_img + p_elf_header32->e_shoff);
187 		p_shstr_tbl_head32 = p_sec_tbl_head32 + p_elf_header32->e_shstrndx;
188 		p_shstr_tbl = (char *)(p_elf_img + p_shstr_tbl_head32->sh_offset);
189 		/* Currently we don't support relocatable sections(sh_type is SHT_REL or SHT_RELA).
190 			Assume that the guest elf do not have relocatable sections. */
191 		for (i = 0U; i < p_elf_header32->e_shnum; i++) {
192 			/* A section entry's name is an offset, real string is in string tab */
193 			p_sec_name = p_shstr_tbl + p_sec_tbl_head32->sh_name;
194 			if ((strncmp(p_sec_name, "bss", 3) == 0) || (strncmp(p_sec_name, ".bss", 4) == 0)) {
195 				p_elf_bss = gpa2hva(vm, p_sec_tbl_head32->sh_addr);
196 				memset(p_elf_bss, 0U, p_sec_tbl_head32->sh_size);
197 			}
198 			p_sec_tbl_head32++;
199 		}
200 
201 		elf_entry = (void *)(uint64_t)p_elf_header32->e_entry;
202 	} else {
203 		pr_err("%s, elf type(%x) not supported!", __func__, p_elf_header32->e_type);
204 	}
205 
206 	return elf_entry;
207 }
208 
209 /**
210  * @pre vm != NULL
211  */
load_elf(struct acrn_vm * vm)212 static int32_t load_elf(struct acrn_vm *vm)
213 {
214 	void *elf_entry = NULL;
215 	struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
216 	void *p_elf_img = (void *)sw_kernel->kernel_src_addr;
217 	int32_t ret = 0;
218 
219 	stac();
220 
221 	if (*(uint32_t *)p_elf_img == ELFMAGIC) {
222 		if (*(uint8_t *)(p_elf_img + EI_CLASS) == ELFCLASS64) {
223 			elf_entry = do_load_elf64(vm);
224 		} else if (*(uint8_t *)(p_elf_img + EI_CLASS) == ELFCLASS32) {
225 			elf_entry = do_load_elf32(vm);
226 		} else {
227 			pr_err("%s, unsupported elf class(%d)", __func__, *(uint8_t *)(p_elf_img + EI_CLASS));
228 		}
229 	} else {
230 		pr_err("%s, booting elf but no elf header found!", __func__);
231 	}
232 
233 	clac();
234 
235 	sw_kernel->kernel_entry_addr = elf_entry;
236 
237 	if (elf_entry == NULL) {
238 		ret = -EFAULT;
239 	}
240 
241 	return ret;
242 }
243 
find_img_multiboot_header(struct acrn_vm * vm)244 struct multiboot_header *find_img_multiboot_header(struct acrn_vm *vm)
245 {
246 	uint16_t i, j;
247 	struct multiboot_header *ret = NULL;
248 	uint32_t *p = (uint32_t *)vm->sw.kernel_info.kernel_src_addr;
249 
250 	/* Scan the first 8k to detect whether the elf needs multboot info prepared. */
251 	for (i = 0U; i <= (((MEM_4K * 2U) / sizeof(uint32_t)) - 3U); i++) {
252 		if (p[i] == MULTIBOOT_HEADER_MAGIC) {
253 			uint32_t sum = 0U;
254 
255 			/* According to multiboot spec 0.6.96 sec 3.1.2.
256 			 * There are three u32:
257 			 *  offset   field
258 			 *    0      multiboot_header_magic
259 			 *    4      flags
260 			 *    8      checksum
261 			 * The sum of these three u32 should be u32 zero.
262 			 */
263 			for (j = 0U; j < 3U; j++) {
264 				sum += p[j + i];
265 			}
266 
267 			if (0U == sum) {
268 				ret = (struct multiboot_header *)(p + i);
269 				break;
270 			}
271 		}
272 	}
273 	return ret;
274 }
275 
elf_loader(struct acrn_vm * vm)276 int32_t elf_loader(struct acrn_vm *vm)
277 {
278 	int32_t ret = 0;
279 	struct multiboot_header *mb_hdr;
280 	/* Get primary vcpu */
281 	struct acrn_vcpu *vcpu = vcpu_from_vid(vm, BSP_CPU_ID);
282 	/*
283 	 * Assuming the guest elf would not load content to GPA space under
284 	 * VIRT_RSDP_ADDR, and guest gpa load space is sure under address
285 	 * we prepared in ve820.c. In the future, need to check each
286 	 * ELF load entry according to ve820 if relocation is not supported.
287 	 */
288 	uint64_t load_params_gpa = find_space_from_ve820(vm, sizeof(struct elf_boot_para),
289 				   MEM_4K, VIRT_RSDP_ADDR);
290 
291 	if (load_params_gpa != INVALID_GPA) {
292 		/* We boot ELF Image from protected mode directly */
293 		init_vcpu_protect_mode_regs(vcpu, load_params_gpa +
294 					    offsetof(struct elf_boot_para, init_gdt));
295 		stac();
296 		mb_hdr = find_img_multiboot_header(vm);
297 		clac();
298 		if (mb_hdr != NULL) {
299 			uint32_t mmap_length = 0U;
300 			struct multiboot_info mb_info;
301 
302 			stac();
303 			if ((mb_hdr->flags & MULTIBOOT_HEADER_NEED_MEMINFO) != 0U) {
304 				mmap_length = prepare_multiboot_mmap(vm, load_params_gpa +
305 						offsetof(struct elf_boot_para, mmap));
306 			}
307 
308 			if (mmap_length != 0U) {
309 				mb_info.mi_flags |= MULTIBOOT_INFO_HAS_MMAP;
310 				mb_info.mi_mmap_addr = (uint32_t)(load_params_gpa +
311 						offsetof(struct elf_boot_para, mmap));
312 				mb_info.mi_mmap_length = mmap_length;
313 			}
314 			ret = prepare_elf_cmdline(vm, load_params_gpa +
315 						offsetof(struct elf_boot_para, cmdline));
316 			if (ret == 0) {
317 				mb_info.mi_flags |= MULTIBOOT_INFO_HAS_CMDLINE;
318 				mb_info.mi_cmdline = load_params_gpa +
319 						offsetof(struct elf_boot_para, cmdline);
320 				ret = prepare_loader_name(vm, load_params_gpa +
321 						offsetof(struct elf_boot_para, loader_name));
322 			}
323 
324 			if (ret == 0) {
325 				mb_info.mi_flags |= MULTIBOOT_INFO_HAS_LOADER_NAME;
326 				mb_info.mi_loader_name = load_params_gpa +
327 						offsetof(struct elf_boot_para, loader_name);
328 				ret = copy_to_gpa(vm, (void *)&mb_info, load_params_gpa +
329 						offsetof(struct elf_boot_para, mb_info),
330 						sizeof(struct multiboot_info));
331 			}
332 
333 			if (ret == 0) {
334 				vcpu_set_gpreg(vcpu, CPU_REG_RAX, MULTIBOOT_INFO_MAGIC);
335 				vcpu_set_gpreg(vcpu, CPU_REG_RBX, load_params_gpa +
336 						offsetof(struct elf_boot_para, mb_info));
337 				/* other vcpu regs should have satisfied multiboot requirement already. */
338 			}
339 			clac();
340 		}
341 		/*
342 		 * elf_loader need support non-multiboot header image
343 		 * at the same time.
344 		 */
345 		if (ret == 0) {
346 			ret = load_elf(vm);
347 		}
348 	}
349 	return ret;
350 }
351