1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <acrn_hv_defs.h>
9 #include <asm/page.h>
10 #include <asm/e820.h>
11 #include <asm/mmu.h>
12 #include <boot.h>
13 #include <reloc.h>
14 #include <efi_mmap.h>
15 #include <logmsg.h>
16 #include <asm/guest/ept.h>
17 
18 /*
19  * e820.c contains the related e820 operations; like HV to get memory info for its MMU setup;
20  * and hide HV memory from Service VM...
21  */
22 
23 static uint32_t hv_e820_entries_nr;
24 static uint64_t hv_e820_ram_size;
25 /* Describe the memory layout the hypervisor uses */
26 static struct e820_entry hv_e820[E820_MAX_ENTRIES];
27 
28 #define DBG_LEVEL_E820	6U
29 
30 /*
31  * @brief reserve some RAM, hide it from Service VM, return its start address
32  *
33  * e820_alloc_memory requires 4k alignment, so size_arg will be converted
34  * in the function.
35  *
36  * @param size_arg Amount of memory to be found and marked reserved
37  * @param max_addr Maximum address below which memory is to be identified
38  *
39  * @pre hv_e820_entries_nr > 0U
40  * @return base address of the memory region
41  */
e820_alloc_memory(uint64_t size_arg,uint64_t max_addr)42 uint64_t e820_alloc_memory(uint64_t size_arg, uint64_t max_addr)
43 {
44 	int32_t i;
45 	uint64_t size = round_page_up(size_arg);
46 	uint64_t ret = INVALID_HPA;
47 	struct e820_entry *entry, *new_entry;
48 
49 	for (i = (int32_t)hv_e820_entries_nr - 1; i >= 0; i--) {
50 		entry = &hv_e820[i];
51 		uint64_t start, end, length;
52 
53 		start = round_page_up(entry->baseaddr);
54 		end = round_page_down(entry->baseaddr + entry->length);
55 		length = (end > start) ? (end - start) : 0UL;
56 
57 		if ((entry->type == E820_TYPE_RAM) && (length >= size) && ((start + size) <= max_addr)) {
58 
59 
60 			/* found exact size of e820 entry */
61 			if (length == size) {
62 				entry->type = E820_TYPE_RESERVED;
63 				ret = start;
64 			} else {
65 
66 				/*
67 				 * found entry with available memory larger than requested (length > size)
68 				 * Reserve memory if
69 				 * 1) hv_e820_entries_nr < E820_MAX_ENTRIES
70 				 * 2) if end of this "entry" is <= max_addr
71 				 *    use memory from end of this e820 "entry".
72 				 */
73 
74 				if ((hv_e820_entries_nr < E820_MAX_ENTRIES) && (end <= max_addr)) {
75 
76 					new_entry = &hv_e820[hv_e820_entries_nr];
77 					new_entry->type = E820_TYPE_RESERVED;
78 					new_entry->baseaddr = end - size;
79 					new_entry->length = (entry->baseaddr + entry->length) - new_entry->baseaddr;
80 					/* Shrink the existing entry and total available memory */
81 					entry->length -= new_entry->length;
82 					hv_e820_entries_nr++;
83 
84 				        ret = new_entry->baseaddr;
85 				}
86 			}
87 
88 			if (ret != INVALID_HPA) {
89 				break;
90 			}
91 		}
92 	}
93 
94 	if ((ret == INVALID_HPA) || (ret == 0UL)) {
95 		/* current memory allocation algorithm is to find the available address from the highest
96 		 * possible address below max_addr. if ret == 0, means all memory is used up and we have to
97 		 * put the resource at address 0, this is dangerous.
98 		 * Also ret == 0 would make code logic very complicated, since memcpy_s() doesn't support
99 		 * address 0 copy.
100 		 */
101 		panic("Requested memory from E820 cannot be reserved!!");
102 	}
103 
104 	return ret;
105 }
106 
insert_e820_entry(uint32_t index,uint64_t addr,uint64_t length,uint64_t type)107 static void insert_e820_entry(uint32_t index, uint64_t addr, uint64_t length, uint64_t type)
108 {
109 	uint32_t i;
110 
111 	hv_e820_entries_nr++;
112 	ASSERT(hv_e820_entries_nr <= E820_MAX_ENTRIES, "e820 entry overflow");
113 
114 	for (i = hv_e820_entries_nr - 1; i > index; i--) {
115 		hv_e820[i] = hv_e820[i-1];
116 	}
117 
118 	hv_e820[index].baseaddr = addr;
119 	hv_e820[index].length = length;
120 	hv_e820[index].type = type;
121 }
122 
init_e820_from_efi_mmap(void)123 static void init_e820_from_efi_mmap(void)
124 {
125 	uint32_t i, e820_idx = 0U;
126 	const struct efi_memory_desc *efi_mmap_entry = get_efi_mmap_entry();
127 
128 	for (i = 0U; i < get_efi_mmap_entries_count(); i++) {
129 		if (e820_idx >= E820_MAX_ENTRIES) {
130 			pr_err("Too many efi memmap entries !");
131 			break;
132 		}
133 
134 		hv_e820[e820_idx].baseaddr = efi_mmap_entry[i].phys_addr;
135 		hv_e820[e820_idx].length = efi_mmap_entry[i].num_pages * PAGE_SIZE;
136 
137 		/* The EFI BOOT Service releated regions need to be set to reserved and avoid being touched by
138 		 * hypervisor, because at least below software modules rely on them:
139 		 * 1. EFI ESRT(The EFI System Resource Table) which used for UEFI firmware upgrade;
140 		 * 2. Image resource in ACPI BGRT(Boottime Graphics Resource Table) which used for boot time logo;
141 		 */
142 		switch (efi_mmap_entry[i].type)	{
143 		case EFI_LOADER_CODE:
144 		case EFI_LOADER_DATA:
145 		case EFI_CONVENTIONAL_MEMORY:
146 			if ((efi_mmap_entry[i].attribute & EFI_MEMORY_WB) != 0UL) {
147 				hv_e820[e820_idx].type = E820_TYPE_RAM;
148 			} else {
149 				hv_e820[e820_idx].type = E820_TYPE_RESERVED;
150 			}
151 			break;
152 		case EFI_UNUSABLE_MEMORY:
153 			hv_e820[e820_idx].type = E820_TYPE_UNUSABLE;
154 			break;
155 		case EFI_ACPI_RECLAIM_MEMORY:
156 			hv_e820[e820_idx].type = E820_TYPE_ACPI_RECLAIM;
157 			break;
158 		case EFI_ACPI_MEMORY_NVS:
159 			hv_e820[e820_idx].type = E820_TYPE_ACPI_NVS;
160 			break;
161 		/* case EFI_RESERVED_MEMORYTYPE:
162 		 * case EFI_BOOT_SERVICES_CODE:
163 		 * case EFI_BOOT_SERVICES_DATA:
164 		 * case EFI_RUNTIME_SERVICES_CODE:
165 		 * case EFI_RUNTIME_SERVICES_DATA:
166 		 * case EFI_MEMORYMAPPED_IO:
167 		 * case EFI_MEMORYMAPPED_IOPORTSPACE:
168 		 * case EFI_PALCODE:
169 		 * case EFI_PERSISTENT_MEMORY:
170 		 */
171 		default:
172 			hv_e820[e820_idx].type = E820_TYPE_RESERVED;
173 			break;
174 		}
175 
176 		/* Given the efi memmap has been sorted, the hv_e820[] is also sorted.
177 		 * Then the algorithm is very simple, just merge with previous mmap entry
178 		 * if type is same and base addr is continuous.
179 		 */
180 		if ((e820_idx > 0U) && (hv_e820[e820_idx].type == hv_e820[e820_idx - 1U].type)
181 				&& (hv_e820[e820_idx].baseaddr ==
182 					(hv_e820[e820_idx - 1U].baseaddr
183 					+ hv_e820[e820_idx - 1U].length))) {
184 			hv_e820[e820_idx - 1U].length += hv_e820[e820_idx].length;
185 		} else {
186 			dev_dbg(DBG_LEVEL_E820, "efi mmap hv_e820[%d]: type: 0x%x Base: 0x%016lx length: 0x%016lx",
187 			    e820_idx, hv_e820[e820_idx].type, hv_e820[e820_idx].baseaddr, hv_e820[e820_idx].length);
188 			e820_idx ++;
189 		}
190 
191 	}
192 
193 	hv_e820_entries_nr = e820_idx;
194 
195 }
196 
197 /* HV read multiboot header to get e820 entries info and calc total RAM info */
init_e820_from_mmap(struct acrn_boot_info * abi)198 static void init_e820_from_mmap(struct acrn_boot_info *abi)
199 {
200 	uint32_t i;
201 
202 	struct abi_mmap *mmap = abi->mmap_entry;
203 
204 	hv_e820_entries_nr = abi->mmap_entries;
205 
206 	dev_dbg(DBG_LEVEL_E820, "mmap addr 0x%x entries %d\n",
207 		abi->mmap_entry, hv_e820_entries_nr);
208 
209 	for (i = 0U; i < hv_e820_entries_nr; i++) {
210 
211 		hv_e820[i].baseaddr = mmap[i].baseaddr;
212 		hv_e820[i].length = mmap[i].length;
213 		hv_e820[i].type = mmap[i].type;
214 
215 		dev_dbg(DBG_LEVEL_E820, "mmap hv_e820[%d]: type: 0x%x Base: 0x%016lx length: 0x%016lx", i,
216 			mmap[i].type, mmap[i].baseaddr, mmap[i].length);
217 	}
218 }
219 
calculate_e820_ram_size(void)220 static void calculate_e820_ram_size(void)
221 {
222         uint32_t i;
223 
224         for(i = 0; i < hv_e820_entries_nr; i++){
225                 dev_dbg(DBG_LEVEL_E820, "hv_e820[%d]:type: 0x%x Base: 0x%016lx length: 0x%016lx", i,
226                                 hv_e820[i].type, hv_e820[i].baseaddr, hv_e820[i].length);
227 
228                 if (hv_e820[i].type == E820_TYPE_RAM) {
229                         hv_e820_ram_size += hv_e820[i].baseaddr + hv_e820[i].length;
230                 }
231         }
232 
233         dev_dbg(DBG_LEVEL_E820, "ram size: 0x%016lx ",hv_e820_ram_size);
234 }
235 
reserve_e820_region(uint64_t start_hpa,uint64_t end_hpa)236 static void reserve_e820_region(uint64_t start_hpa, uint64_t end_hpa)
237 {
238 	uint32_t e820_index;
239 	uint64_t entry_start, entry_end;
240 
241 	for (e820_index = 0; e820_index < hv_e820_entries_nr; e820_index++) {
242 		entry_start = hv_e820[e820_index].baseaddr;
243 		entry_end = hv_e820[e820_index].baseaddr + hv_e820[e820_index].length;
244 
245 		/* No need handle in these cases*/
246 		if ((hv_e820[e820_index].type != E820_TYPE_RAM) || (entry_end <= start_hpa) || (entry_start >= end_hpa)) {
247 			continue;
248 		}
249 
250 		if ((entry_start <= start_hpa) && (entry_end >= end_hpa)) {
251 			hv_e820[e820_index].length = start_hpa - entry_start;
252 
253 			if (end_hpa < entry_end) {
254 				/*
255 				* .......|start_hpa......................end_hpa|.....
256 				* |entry_start..............................entry_end|
257 				*/
258 				insert_e820_entry(e820_index + 1, end_hpa, entry_end - end_hpa, E820_TYPE_RAM);
259 			}
260 		} else {
261 			panic("%s: region 0x%016x-0x%016x crosses multiple e820 entries, check your bootloader!",
262 					   __func__, entry_start, entry_end);
263 		}
264 	}
265 }
266 
alloc_hv_memory(void)267 static void alloc_hv_memory(void)
268 {
269 	uint64_t hv_start = hva2hpa((void *)(get_hv_image_base()));
270 	uint64_t hv_end  = hv_start + get_hv_image_size();
271 
272 	pr_err("%s: hv start: 0x%016x, end: 0x%016x", __func__, hv_start, hv_end);
273 
274 	reserve_e820_region(hv_start, hv_end);
275 }
276 
alloc_mods_memory(void)277 static void alloc_mods_memory(void)
278 {
279 	uint32_t mod_index, e820_index, target_index;
280 	uint64_t mod_start, mod_end;
281 	uint64_t entry_start, entry_end;
282 	struct acrn_boot_info *abi = get_acrn_boot_info();
283 
284 	/* 1st pass: remove the exact region */
285 	for (mod_index = 0; mod_index < abi->mods_count; mod_index++) {
286 		mod_start = hva2hpa(abi->mods[mod_index].start);
287 		mod_end = mod_start + abi->mods[mod_index].size;
288 
289 		pr_err("%s: mod %d, start: 0x%016x, end: 0x%016x", __func__, mod_index, mod_start, mod_end);
290 
291 		reserve_e820_region(mod_start, mod_end);
292 	}
293 
294 	/* 2nd pass: shrink the entries to page boundary */
295 	target_index = 0;
296 	for (e820_index = 0; e820_index < hv_e820_entries_nr; e820_index++) {
297 		entry_start = round_page_up(hv_e820[e820_index].baseaddr);
298 		entry_end = round_page_down(hv_e820[e820_index].baseaddr + hv_e820[e820_index].length);
299 
300 		if (entry_start < entry_end) {
301 			hv_e820[target_index].baseaddr = entry_start;
302 			hv_e820[target_index].length = entry_end - entry_start;
303 			hv_e820[target_index].type = hv_e820[e820_index].type;
304 			target_index++;
305 		}
306 	}
307 	memset(&hv_e820[target_index], 0, (hv_e820_entries_nr - target_index) * sizeof(struct e820_entry));
308 	hv_e820_entries_nr = target_index;
309 }
310 
311 
init_e820(void)312 void init_e820(void)
313 {
314 	struct acrn_boot_info *abi = get_acrn_boot_info();
315 
316 	if (boot_from_uefi(abi)) {
317 		init_efi_mmap_entries(&abi->uefi_info);
318 		init_e820_from_efi_mmap();
319 	} else {
320 		init_e820_from_mmap(abi);
321 	}
322 
323 	calculate_e820_ram_size();
324 	alloc_hv_memory();
325 	/* reserve multiboot modules memory */
326 	alloc_mods_memory();
327 }
328 
get_e820_ram_size(void)329 uint64_t get_e820_ram_size(void)
330 {
331         return hv_e820_ram_size;
332 }
333 
get_e820_entries_count(void)334 uint32_t get_e820_entries_count(void)
335 {
336 	return hv_e820_entries_nr;
337 }
338 
get_e820_entry(void)339 const struct e820_entry *get_e820_entry(void)
340 {
341 	return hv_e820;
342 }
343