1 /*
2  * Copyright (C) 2019-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <asm/e820.h>
8 #include <asm/mmu.h>
9 #include <asm/guest/vm.h>
10 #include <asm/guest/ept.h>
11 #include <reloc.h>
12 #include <vacpi.h>
13 #include <logmsg.h>
14 #include <asm/rtcm.h>
15 #include <ptdev.h>
16 
17 #define ENTRY_GPA_L		2U
18 #define ENTRY_GPA_HI		8U
19 
20 static struct e820_entry service_vm_e820[E820_MAX_ENTRIES];
21 static struct e820_entry pre_vm_e820[PRE_VM_NUM][E820_MAX_ENTRIES];
22 
find_space_from_ve820(struct acrn_vm * vm,uint32_t size,uint64_t min_addr,uint64_t max_addr)23 uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr)
24 {
25 	int32_t i;
26 	uint64_t gpa = INVALID_GPA;
27 	uint64_t round_min_addr = round_page_up(min_addr);
28 	uint64_t round_max_addr = round_page_down(max_addr);
29 	uint32_t round_size = round_page_up(size);
30 
31 	for (i = (int32_t)(vm->e820_entry_num - 1U); i >= 0; i--) {
32 		struct e820_entry *entry = vm->e820_entries + i;
33 		uint64_t start, end, length;
34 
35 		start = round_page_up(entry->baseaddr);
36 		end = round_page_down(entry->baseaddr + entry->length);
37 		length = (end > start) ? (end - start) : 0UL;
38 
39 		if ((entry->type == E820_TYPE_RAM) && (length >= (uint64_t)round_size)
40 				&& (end > round_min_addr) && (start < round_max_addr)) {
41 			if (((start >= min_addr) && ((start + round_size) <= min(end, round_max_addr)))
42 				|| ((start < min_addr) && ((min_addr + round_size) <= min(end, round_max_addr)))) {
43 				gpa = (end > round_max_addr) ? (round_max_addr - round_size) : (end - round_size);
44 				break;
45 			}
46 		}
47 	}
48 	return gpa;
49 }
50 
51 /* a sorted VM e820 table is critical for memory allocation or slot location,
52  * for example, put ramdisk at the end of TOLUD(Top of LOW Usable DRAM) and
53  * put kernel at its begining so that provide largest load capicity for them.
54  */
sort_vm_e820(struct acrn_vm * vm)55 static void sort_vm_e820(struct acrn_vm *vm)
56 {
57 	uint32_t i,j;
58 	struct e820_entry tmp_entry;
59 
60 	/* Bubble sort */
61 	for (i = 0U; i < (vm->e820_entry_num - 1U); i++) {
62 		for (j = 0U; j < (vm->e820_entry_num - i - 1U); j++) {
63 			if (vm->e820_entries[j].baseaddr > vm->e820_entries[j + 1U].baseaddr) {
64 				tmp_entry = vm->e820_entries[j];
65 				vm->e820_entries[j] = vm->e820_entries[j + 1U];
66 				vm->e820_entries[j + 1U] = tmp_entry;
67 			}
68 		}
69 	}
70 }
71 
filter_mem_from_service_vm_e820(struct acrn_vm * vm,uint64_t start_pa,uint64_t end_pa)72 static void filter_mem_from_service_vm_e820(struct acrn_vm *vm, uint64_t start_pa, uint64_t end_pa)
73 {
74 	uint32_t i;
75 	uint64_t entry_start;
76 	uint64_t entry_end;
77 	uint32_t entries_count = vm->e820_entry_num;
78 	struct e820_entry *entry, new_entry = {0};
79 
80 	for (i = 0U; i < entries_count; i++) {
81 		entry = &service_vm_e820[i];
82 		entry_start = entry->baseaddr;
83 		entry_end = entry->baseaddr + entry->length;
84 
85 		/* No need handle in these cases*/
86 		if ((entry->type != E820_TYPE_RAM) || (entry_end <= start_pa) || (entry_start >= end_pa)) {
87 			continue;
88 		}
89 
90 		/* filter out the specific memory and adjust length of this entry*/
91 		if ((entry_start < start_pa) && (entry_end <= end_pa)) {
92 			entry->length = start_pa - entry_start;
93 			continue;
94 		}
95 
96 		/* filter out the specific memory and need to create a new entry*/
97 		if ((entry_start < start_pa) && (entry_end > end_pa)) {
98 			entry->length = start_pa - entry_start;
99 			new_entry.baseaddr = end_pa;
100 			new_entry.length = entry_end - end_pa;
101 			new_entry.type = E820_TYPE_RAM;
102 			continue;
103 		}
104 
105 		/* This entry is within the range of specific memory
106 		 * change to E820_TYPE_RESERVED
107 		 */
108 		if ((entry_start >= start_pa) && (entry_end <= end_pa)) {
109 			entry->type = E820_TYPE_RESERVED;
110 			continue;
111 		}
112 
113 		if ((entry_start >= start_pa) && (entry_start < end_pa) && (entry_end > end_pa)) {
114 			entry->baseaddr = end_pa;
115 			entry->length = entry_end - end_pa;
116 			continue;
117 		}
118 	}
119 
120 	if (new_entry.length > 0UL) {
121 		entries_count++;
122 		ASSERT(entries_count <= E820_MAX_ENTRIES, "e820 entry overflow");
123 		entry = &service_vm_e820[entries_count - 1U];
124 		entry->baseaddr = new_entry.baseaddr;
125 		entry->length = new_entry.length;
126 		entry->type = new_entry.type;
127 		vm->e820_entry_num = entries_count;
128 	}
129 
130 }
131 
132 /**
133  * before boot Service VM, call it to hide HV and prelaunched VM memory in e820 table from Service VM
134  *
135  * @pre vm != NULL
136  */
create_service_vm_e820(struct acrn_vm * vm)137 void create_service_vm_e820(struct acrn_vm *vm)
138 {
139 	uint16_t vm_id;
140 	uint32_t i;
141 	uint32_t entries_count = get_e820_entries_count();
142 	struct acrn_vm_config *service_vm_config = get_vm_config(vm->vm_id);
143 
144 	(void)memcpy_s((void *)service_vm_e820, entries_count * sizeof(struct e820_entry),
145 			(const void *)get_e820_entry(), entries_count * sizeof(struct e820_entry));
146 
147 	vm->e820_entry_num = entries_count;
148 	vm->e820_entries = service_vm_e820;
149 
150 	/* filter out prelaunched vm memory from e820 table */
151 	for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
152 		struct acrn_vm_config *vm_config = get_vm_config(vm_id);
153 
154 		if (vm_config->load_order == PRE_LAUNCHED_VM) {
155 			for (i = 0; i < vm_config->memory.region_num; i++) {
156 				filter_mem_from_service_vm_e820(vm, vm_config->memory.host_regions[i].start_hpa,
157 						vm_config->memory.host_regions[i].start_hpa
158 						+ vm_config->memory.host_regions[i].size_hpa);
159 			}
160 		}
161 	}
162 
163 	for (i = 0U; i < vm->e820_entry_num; i++) {
164 		struct e820_entry *entry = &service_vm_e820[i];
165 		if (entry->type == E820_TYPE_RAM) {
166 			service_vm_config->memory.size += entry->length;
167 		}
168 	}
169 	sort_vm_e820(vm);
170 }
171 
172 static const struct e820_entry pre_ve820_template[E820_MAX_ENTRIES] = {
173 	{	/* usable RAM under 1MB */
174 		.baseaddr = 0x0UL,
175 		.length   = 0xA0000UL,		/* 640KB */
176 		.type     = E820_TYPE_RAM
177 	},
178 	{	/* Video/BIOS extentions */
179 		.baseaddr = 0xA0000UL,
180 		.length   = 0x60000UL,		/* 384KB */
181 		.type     = E820_TYPE_RESERVED
182 	},
183 	{	/* hpa1_low */
184 		.baseaddr = MEM_1M,		/* 1MB */
185 		.length   = PRE_RTVM_SW_SRAM_BASE_GPA - MEM_1M,
186 		.type     = E820_TYPE_RAM
187 	},
188 	{	/* Software SRAM */
189 		.baseaddr = PRE_RTVM_SW_SRAM_BASE_GPA,
190 		.length   = PRE_RTVM_SW_SRAM_MAX_SIZE,
191 		.type     = E820_TYPE_RESERVED
192 	},
193 	{	/* GPU OpRegion for pre-launched VM */
194 		.baseaddr = GPU_OPREGION_GPA,
195 		.length   = GPU_OPREGION_SIZE,
196 		.type     = E820_TYPE_RESERVED
197 	},
198 	{	/* ACPI Reclaim */
199 		.baseaddr = VIRT_ACPI_DATA_ADDR,/* consecutive from 0x7fe00000UL */
200 		.length   = VIRT_ACPI_DATA_LEN,
201 		.type	  = E820_TYPE_ACPI_RECLAIM
202 	},
203 	{	/* ACPI NVS */
204 		.baseaddr = VIRT_ACPI_NVS_ADDR,	/* consecutive after ACPI Reclaim */
205 		.length   = VIRT_ACPI_NVS_LEN,
206 		.type	  = E820_TYPE_ACPI_NVS
207 	},
208 	{	/* 32bit PCI hole */
209 		.baseaddr = 0x80000000UL,	/* 2048MB */
210 		.length   = MEM_2G,
211 		.type     = E820_TYPE_RESERVED
212 	},
213 };
214 
calculate_memory_size(struct vm_hpa_regions * regions,uint64_t num)215 static inline uint64_t calculate_memory_size(struct vm_hpa_regions *regions, uint64_t num)
216 {
217 	uint64_t i;
218 	uint64_t size = 0;
219 
220 	for(i = 0; i < num; i++) {
221 		size += regions[i].size_hpa;
222 	}
223 
224 	return size;
225 }
226 
227 /**
228  * @pre entry != NULL
229  */
add_ram_entry(struct e820_entry * entry,uint64_t gpa,uint64_t length)230 static inline uint64_t add_ram_entry(struct e820_entry *entry, uint64_t gpa, uint64_t length)
231 {
232 	entry->baseaddr = gpa;
233 	entry->length = length;
234 	entry->type = E820_TYPE_RAM;
235 	return round_pde_up(entry->baseaddr + entry->length);
236 }
237 
238 /**
239  * @pre vm != NULL
240  *
241  * ve820 layout for pre-launched VM:
242  *
243  *   entry0: usable under 1MB
244  *   entry1: reserved for MP Table/ACPI RSDP from 0xf0000 to 0xfffff
245  *   entry2: hpa1_low
246  *   entry3: reserved, Software SRAM segment, from 0x7f5fb000 to 0x7fdfb000(8M)
247  *           this address is also hard-coded in offline tool to generate guest's RTCT/PTCT table
248  *   entry4: gpu_opregion (0x5000)
249  *   entry5: ACPI Reclaim from 0x7fe00000 to 0x7fefffff (1M)
250  *   entry6: ACPI NVS from 0x7ff00000 to 0x7fffffff (1M)
251  *            Currently this is used by:
252  *            a) first 64k reserved
253  *            if CONFIG_SECURITY_VM_FIXUP enabled,
254  *              b) TPM2 event log region (if platform supports TPM2 eventlog) from 0x7ffb0000 to 0x7fffffff
255  *              c) SMBIOS table in between 64k and 0xb0000
256  *   entry7: reserved for 32bit PCI hole from 0x80000000 to 0xffffffff
257  *   (entry8): usable for
258  *            a) hpa1_hi, if hpa1 > 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE
259  *            b) hpa2, if (hpa1 + hpa2) < 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE
260  *            c) hpa2_lo,
261  *               if hpa1 < 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE and (hpa1 + hpa2) > 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE
262  *   (entry9): usable for
263  *            a) hpa2, if hpa1 > 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE
264  *            b) hpa2_hi,
265  *               if hpa1 < 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE and (hpa1 + hpa2) > 2GB - PRE_RTVM_SW_SRAM_MAX_SIZE
266  */
267 
268 /*
269 	The actual memory mapping under 2G looks like below:
270 	|<--1M-->|
271 	|<-----hpa1_low--->|
272 	|<---Non-mapped hole (if there is)-->|
273 	|<---Software SRAM--->|
274 	|<-----gpu_opregion--->|
275 	|<---(1M + 1M) ACPI NVS/DATA--->|
276 */
create_prelaunched_vm_e820(struct acrn_vm * vm)277 void create_prelaunched_vm_e820(struct acrn_vm *vm)
278 {
279 	struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
280 	uint64_t gpa_start = 0x100000000UL;
281 	uint64_t gpa_hi_size;
282 	uint64_t lowmem_max_length = MEM_2G - PRE_RTVM_SW_SRAM_MAX_SIZE - GPU_OPREGION_SIZE;
283 	uint32_t entry_idx = ENTRY_GPA_HI;
284 	uint64_t memory_size = calculate_memory_size(vm_config->memory.host_regions, vm_config->memory.region_num);
285 
286 	vm->e820_entries = pre_vm_e820[vm->vm_id];
287 	(void)memcpy_s((void *)vm->e820_entries,  E820_MAX_ENTRIES * sizeof(struct e820_entry),
288 		(const void *)pre_ve820_template, E820_MAX_ENTRIES * sizeof(struct e820_entry));
289 
290 	if (memory_size > lowmem_max_length) {
291 		gpa_hi_size = memory_size - lowmem_max_length;
292 		add_ram_entry((vm->e820_entries + entry_idx), gpa_start, gpa_hi_size);
293 		entry_idx++;
294 	} else {
295 		/* need to revise length of hpa1 entry to its actual size, excluding size of used space */
296 		vm->e820_entries[ENTRY_GPA_L].length = memory_size - MEM_1M - VIRT_ACPI_DATA_LEN - VIRT_ACPI_NVS_LEN;
297 	}
298 
299 	vm->e820_entry_num = entry_idx;
300 	sort_vm_e820(vm);
301 }
302