1 /*
2  * Copyright (C) 2021-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <logmsg.h>
9 #include <asm/mmu.h>
10 #include <asm/guest/vcpu.h>
11 #include <asm/guest/vm.h>
12 #include <asm/guest/vmexit.h>
13 #include <asm/guest/ept.h>
14 #include <asm/guest/vept.h>
15 #include <asm/guest/nested.h>
16 
17 #define VETP_LOG_LEVEL			LOG_DEBUG
18 #define CONFIG_MAX_GUEST_EPT_NUM	(MAX_ACTIVE_VVMCS_NUM * MAX_VCPUS_PER_VM)
19 static struct vept_desc vept_desc_bucket[CONFIG_MAX_GUEST_EPT_NUM];
20 static spinlock_t vept_desc_bucket_lock;
21 
22 /*
23  * For simplicity, total platform RAM size is considered to calculate the
24  * memory needed for shadow page tables. This is not an accurate upper bound.
25  * This can satisfy typical use-cases where there is not a lot ofovercommitment
26  * and sharing of memory between L2 VMs.
27  *
28  * Page table entry need 8 bytes to represent every 4K page frame.
29  * Total number of bytes = (get_e820_ram_size() / PAGE_SIZE) * 8
30  * Number of pages needed = Total number of bytes needed/PAGE_SIZE
31  */
calc_sept_size(void)32 static uint64_t calc_sept_size(void)
33 {
34 	return (get_e820_ram_size() * 8UL) / PAGE_SIZE;
35 }
36 
calc_sept_page_num(void)37 static uint64_t calc_sept_page_num(void)
38 {
39 	return calc_sept_size() / PAGE_SIZE;
40 }
41 
42 static struct page_pool sept_page_pool;
43 static struct page *sept_pages;
44 static uint64_t *sept_page_bitmap;
45 
46 /*
47  * @brief Reserve space for SEPT pages from platform E820 table
48  * 	  At moment, we only support nested VMX for Service VM.
49  */
init_vept_pool(void)50 static void init_vept_pool(void)
51 {
52 	uint64_t page_base;
53 
54 	page_base = e820_alloc_memory(calc_sept_size(), MEM_SIZE_MAX);
55 
56 	set_paging_supervisor(page_base, calc_sept_size());
57 
58 	sept_pages = (struct page *)page_base;
59 	sept_page_bitmap = (uint64_t *)e820_alloc_memory((calc_sept_page_num() / 64U), MEM_SIZE_MAX);
60 }
61 
is_present_ept_entry(uint64_t ept_entry)62 static bool is_present_ept_entry(uint64_t ept_entry)
63 {
64 	return ((ept_entry & EPT_RWX) != 0U);
65 }
66 
is_leaf_ept_entry(uint64_t ept_entry,enum _page_table_level pt_level)67 static bool is_leaf_ept_entry(uint64_t ept_entry, enum _page_table_level pt_level)
68 {
69 	return (((ept_entry & PAGE_PSE) != 0U) || (pt_level == IA32E_PT));
70 }
71 
72 /*
73  * @brief Release all pages except the PML4E page of a shadow EPT
74  */
free_sept_table(uint64_t * shadow_eptp)75 static void free_sept_table(uint64_t *shadow_eptp)
76 {
77 	uint64_t *shadow_pml4e, *shadow_pdpte, *shadow_pde;
78 	uint64_t i, j, k;
79 
80 	if (shadow_eptp) {
81 		for (i = 0UL; i < PTRS_PER_PML4E; i++) {
82 			shadow_pml4e = pml4e_offset(shadow_eptp, i << PML4E_SHIFT);
83 			if (!is_present_ept_entry(*shadow_pml4e)) {
84 				continue;
85 			}
86 			for (j = 0UL; j < PTRS_PER_PDPTE; j++) {
87 				shadow_pdpte = pdpte_offset(shadow_pml4e, j << PDPTE_SHIFT);
88 				if (!is_present_ept_entry(*shadow_pdpte) ||
89 				    is_leaf_ept_entry(*shadow_pdpte, IA32E_PDPT)) {
90 					continue;
91 				}
92 				for (k = 0UL; k < PTRS_PER_PDE; k++) {
93 					shadow_pde = pde_offset(shadow_pdpte, k << PDE_SHIFT);
94 					if (!is_present_ept_entry(*shadow_pde) ||
95 					    is_leaf_ept_entry(*shadow_pde, IA32E_PD)) {
96 						continue;
97 					}
98 					free_page(&sept_page_pool, (struct page *)((*shadow_pde) & EPT_ENTRY_PFN_MASK));
99 				}
100 				free_page(&sept_page_pool, (struct page *)((*shadow_pdpte) & EPT_ENTRY_PFN_MASK));
101 			}
102 			free_page(&sept_page_pool, (struct page *)((*shadow_pml4e) & EPT_ENTRY_PFN_MASK));
103 			*shadow_pml4e = 0UL;
104 		}
105 	}
106 }
107 
108 /*
109  * @brief Convert a guest EPTP to the associated vept_desc.
110  * @return struct vept_desc * if existed.
111  * @return NULL if non-existed.
112  */
find_vept_desc(uint64_t guest_eptp)113 static struct vept_desc *find_vept_desc(uint64_t guest_eptp)
114 {
115 	uint32_t i;
116 	struct vept_desc *desc = NULL;
117 
118 	if (guest_eptp) {
119 		spinlock_obtain(&vept_desc_bucket_lock);
120 		for (i = 0L; i < CONFIG_MAX_GUEST_EPT_NUM; i++) {
121 			/* Find an existed vept_desc of the guest EPTP */
122 			if (vept_desc_bucket[i].guest_eptp == guest_eptp) {
123 				desc = &vept_desc_bucket[i];
124 				break;
125 			}
126 		}
127 		spinlock_release(&vept_desc_bucket_lock);
128 	}
129 
130 	return desc;
131 }
132 
133 /*
134  * @brief Convert a guest EPTP to a shadow EPTP.
135  * @return 0 if non-existed.
136  */
get_shadow_eptp(uint64_t guest_eptp)137 uint64_t get_shadow_eptp(uint64_t guest_eptp)
138 {
139 	struct vept_desc *desc = NULL;
140 
141 	desc = find_vept_desc(guest_eptp);
142 	return (desc != NULL) ? hva2hpa((void *)desc->shadow_eptp) : 0UL;
143 }
144 
145 /*
146  * @brief Get a vept_desc to cache a guest EPTP
147  *
148  * If there is already an existed vept_desc associated with given guest_eptp,
149  * increase its ref_count and return it. If there is not existed vept_desc
150  * for guest_eptp, create one and initialize it.
151  *
152  * @return a vept_desc which associate the guest EPTP with a shadow EPTP
153  */
get_vept_desc(uint64_t guest_eptp)154 struct vept_desc *get_vept_desc(uint64_t guest_eptp)
155 {
156 	uint32_t i;
157 	struct vept_desc *desc = NULL;
158 
159 	if (guest_eptp != 0UL) {
160 		spinlock_obtain(&vept_desc_bucket_lock);
161 		for (i = 0L; i < CONFIG_MAX_GUEST_EPT_NUM; i++) {
162 			/* Find an existed vept_desc of the guest EPTP address bits */
163 			if (vept_desc_bucket[i].guest_eptp == guest_eptp) {
164 				desc = &vept_desc_bucket[i];
165 				desc->ref_count++;
166 				break;
167 			}
168 			/* Get the first empty vept_desc for the guest EPTP */
169 			if (!desc && (vept_desc_bucket[i].ref_count == 0UL)) {
170 				desc = &vept_desc_bucket[i];
171 			}
172 		}
173 		ASSERT(desc != NULL, "Get vept_desc failed!");
174 
175 		/* A new vept_desc, initialize it */
176 		if (desc->shadow_eptp == 0UL) {
177 			desc->shadow_eptp = (uint64_t)alloc_page(&sept_page_pool) | (guest_eptp & ~PAGE_MASK);
178 			desc->guest_eptp = guest_eptp;
179 			desc->ref_count = 1UL;
180 
181 			dev_dbg(VETP_LOG_LEVEL, "[%s], vept_desc[%llx] ref[%d] shadow_eptp[%llx] guest_eptp[%llx]",
182 					__func__, desc, desc->ref_count, desc->shadow_eptp, desc->guest_eptp);
183 		}
184 
185 		spinlock_release(&vept_desc_bucket_lock);
186 	}
187 
188 	return desc;
189 }
190 
191 /*
192  * @brief Put a vept_desc who associate with a guest_eptp
193  *
194  * If ref_count of the vept_desc, then release all resources used by it.
195  */
put_vept_desc(uint64_t guest_eptp)196 void put_vept_desc(uint64_t guest_eptp)
197 {
198 	struct vept_desc *desc = NULL;
199 
200 	if (guest_eptp != 0UL) {
201 		desc = find_vept_desc(guest_eptp);
202 		spinlock_obtain(&vept_desc_bucket_lock);
203 		if (desc) {
204 			desc->ref_count--;
205 			if (desc->ref_count == 0UL) {
206 				dev_dbg(VETP_LOG_LEVEL, "[%s], vept_desc[%llx] ref[%d] shadow_eptp[%llx] guest_eptp[%llx]",
207 						__func__, desc, desc->ref_count, desc->shadow_eptp, desc->guest_eptp);
208 				free_sept_table((void *)(desc->shadow_eptp & PAGE_MASK));
209 				free_page(&sept_page_pool, (struct page *)(desc->shadow_eptp & PAGE_MASK));
210 				/* Flush the hardware TLB */
211 				invept((void *)(desc->shadow_eptp & PAGE_MASK));
212 				desc->shadow_eptp = 0UL;
213 				desc->guest_eptp = 0UL;
214 			}
215 		}
216 		spinlock_release(&vept_desc_bucket_lock);
217 	}
218 }
219 
get_leaf_entry(uint64_t gpa,uint64_t * eptp,enum _page_table_level * level)220 static uint64_t get_leaf_entry(uint64_t gpa, uint64_t *eptp, enum _page_table_level *level)
221 {
222 	enum _page_table_level pt_level = IA32E_PML4;
223 	uint16_t offset;
224 	uint64_t ept_entry = 0UL;
225 	uint64_t *p_ept_entry = eptp;
226 
227 	while (pt_level <= IA32E_PT) {
228 		offset = PAGING_ENTRY_OFFSET(gpa, pt_level);
229 		ept_entry = p_ept_entry[offset];
230 
231 		if (is_present_ept_entry(ept_entry)) {
232 			if (is_leaf_ept_entry(ept_entry, pt_level)) {
233 				*level = pt_level;
234 				break;
235 			}
236 		} else {
237 			ept_entry = 0UL;
238 			pr_err("%s, GPA[%llx] is invalid!", __func__, gpa);
239 			break;
240 		}
241 
242 		p_ept_entry = (uint64_t *)(ept_entry & EPT_ENTRY_PFN_MASK);
243 		pt_level += 1;
244 	}
245 
246 	return ept_entry;
247 }
248 
249 /**
250  * @brief Shadow a guest EPT entry
251  * @pre vcpu != NULL
252  */
generate_shadow_ept_entry(struct acrn_vcpu * vcpu,uint64_t guest_ept_entry,enum _page_table_level guest_ept_level)253 static uint64_t generate_shadow_ept_entry(struct acrn_vcpu *vcpu, uint64_t guest_ept_entry,
254 				    enum _page_table_level guest_ept_level)
255 {
256 	uint64_t shadow_ept_entry = 0UL;
257 	uint64_t ept_entry;
258 	enum _page_table_level ept_level;
259 
260 	/*
261 	 * Create a shadow EPT entry
262 	 * We only support 4K page for guest EPT. So it's simple to create a shadow EPT entry
263 	 * for it. The rules are:
264 	 *   > Find the host EPT leaf entry of address in ept_entry[M-1:12], named as ept_entry
265 	 *   > Minimize the attribute bits (according to ept_entry and guest_ept_entry) and
266 	 *     set in shadow EPT entry shadow_ept_entry.
267 	 *   > Set the HPA of guest_ept_entry[M-1:12] to shadow_ept_entry.
268 	 */
269 	if (is_leaf_ept_entry(guest_ept_entry, guest_ept_level)) {
270 		ASSERT(guest_ept_level == IA32E_PT, "Only support 4K page for guest EPT!");
271 		ept_entry = get_leaf_entry((guest_ept_entry & EPT_ENTRY_PFN_MASK), get_eptp(vcpu->vm), &ept_level);
272 		if (ept_entry != 0UL) {
273 			/*
274 			 * TODO:
275 			 * Now, take guest EPT entry attributes directly. We need take care
276 			 * of memory type, permission bits, reserved bits when we merge EPT
277 			 * entry and guest EPT entry.
278 			 *
279 			 * Just keep the code skeleton here for extend.
280 			 */
281 			shadow_ept_entry = guest_ept_entry & ~EPT_ENTRY_PFN_MASK;
282 
283 			/*
284 			 * Set the address.
285 			 * gpa2hpa() should be successful as ept_entry already be found.
286 			 */
287 			shadow_ept_entry |= gpa2hpa(vcpu->vm, (guest_ept_entry & EPT_ENTRY_PFN_MASK));
288 		}
289 	} else {
290 		/* Use a HPA of a new page in shadow EPT entry */
291 		shadow_ept_entry = guest_ept_entry & ~EPT_ENTRY_PFN_MASK;
292 		shadow_ept_entry |= hva2hpa((void *)alloc_page(&sept_page_pool)) & EPT_ENTRY_PFN_MASK;
293 	}
294 
295 	return shadow_ept_entry;
296 }
297 
298 /*
299  * @brief Check misconfigurations on EPT entries
300  *
301  * SDM 28.2.3.1
302  */
is_ept_entry_misconfig(uint64_t ept_entry,enum _page_table_level pt_level)303 static bool is_ept_entry_misconfig(uint64_t ept_entry, enum _page_table_level pt_level)
304 {
305 	struct cpuinfo_x86 *cpu_info = get_pcpu_info();
306 	uint8_t max_phy_addr_bits = cpu_info->phys_bits;
307 	bool is_misconfig = false;
308 	uint64_t reserved_bits = 0UL;
309 	uint8_t memory_type;
310 
311 	/* Write w/o Read, misconfigured */
312 	is_misconfig = ((ept_entry & (EPT_RD | EPT_WR)) == EPT_WR);
313 
314 	/* Execute-only is not supported */
315 	if (!pcpu_has_vmx_ept_vpid_cap(VMX_EPT_EXECUTE_ONLY)) {
316 		/* Execute w/o Read, misconfigured */
317 		is_misconfig = is_misconfig || ((ept_entry & (EPT_RD | EPT_EXE)) == EPT_EXE);
318 		/*
319 		 * TODO: With 'mode-based execute control for EPT' set,
320 		 * User-execute w/o Read, misconfigured
321 		 *	is_misconfig = is_misconfig || ((epte & (EPT_RD | EPT_XU)) == EPT_XU);
322 		 */
323 	}
324 
325 	/* Reserved bits should be 0, else misconfigured */
326 	switch (pt_level) {
327 	case IA32E_PML4:
328 		reserved_bits = IA32E_PML4E_RESERVED_BITS(max_phy_addr_bits);
329 		break;
330 	case IA32E_PDPT:
331 		if (ept_entry & PAGE_PSE) {
332 			reserved_bits = IA32E_PDPTE_LEAF_RESERVED_BITS(max_phy_addr_bits);
333 		} else {
334 			reserved_bits = IA32E_PDPTE_RESERVED_BITS(max_phy_addr_bits);
335 		}
336 		break;
337 	case IA32E_PD:
338 		if (ept_entry & PAGE_PSE) {
339 			reserved_bits = IA32E_PDE_LEAF_RESERVED_BITS(max_phy_addr_bits);
340 		} else {
341 			reserved_bits = IA32E_PDE_RESERVED_BITS(max_phy_addr_bits);
342 		}
343 		break;
344 	case IA32E_PT:
345 		reserved_bits = IA32E_PTE_RESERVED_BITS(max_phy_addr_bits);
346 		break;
347 	default:
348 		break;
349 	}
350 	is_misconfig = is_misconfig || ((ept_entry & reserved_bits) != 0UL);
351 
352 	/*
353 	 * SDM 28.2.6.2: The EPT memory type is specified in bits 5:3 of the last EPT
354 	 * paging-structure entry: 0 = UC; 1 = WC; 4 = WT; 5 = WP; and 6 = WB.
355 	 * Other values are reserved and cause EPT misconfiguration
356 	 */
357 	if (is_leaf_ept_entry(ept_entry, pt_level)) {
358 		memory_type = ept_entry & EPT_MT_MASK;
359 		is_misconfig = is_misconfig || ((memory_type != EPT_UNCACHED) &&
360 						(memory_type != EPT_WC) &&
361 						(memory_type != EPT_WT) &&
362 						(memory_type != EPT_WP) &&
363 						(memory_type != EPT_WB));
364 	}
365 
366 	return is_misconfig;
367 }
368 
is_access_violation(uint64_t ept_entry)369 static bool is_access_violation(uint64_t ept_entry)
370 {
371 	uint64_t exit_qual = exec_vmread(VMX_EXIT_QUALIFICATION);
372 	bool access_violation = false;
373 
374 	if (/* Caused by data read */
375 	    (((exit_qual & 0x1UL) != 0UL) && ((ept_entry & EPT_RD) == 0)) ||
376 	    /* Caused by data write */
377 	    (((exit_qual & 0x2UL) != 0UL) && ((ept_entry & EPT_WR) == 0)) ||
378 	    /* Caused by instruction fetch */
379 	    (((exit_qual & 0x4UL) != 0UL) && ((ept_entry & EPT_EXE) == 0))) {
380 		access_violation = true;
381 	}
382 
383 	return access_violation;
384 }
385 
386 /**
387  * @brief L2 VM EPT violation handler
388  * @pre vcpu != NULL
389  *
390  * SDM: 28.2.3 EPT-Induced VM Exits
391  *
392  * Walk through guest EPT and fill the entries in shadow EPT
393  */
handle_l2_ept_violation(struct acrn_vcpu * vcpu)394 bool handle_l2_ept_violation(struct acrn_vcpu *vcpu)
395 {
396 	uint64_t guest_eptp = vcpu->arch.nested.current_vvmcs->vmcs12.ept_pointer;
397 	struct vept_desc *desc = find_vept_desc(guest_eptp);
398 	uint64_t l2_ept_violation_gpa = exec_vmread(VMX_GUEST_PHYSICAL_ADDR_FULL);
399 	enum _page_table_level pt_level;
400 	uint64_t guest_ept_entry, shadow_ept_entry;
401 	uint64_t *p_guest_ept_page, *p_shadow_ept_page;
402 	uint16_t offset;
403 	bool is_l1_vmexit = true;
404 
405 	ASSERT(desc != NULL, "Invalid shadow EPTP!");
406 
407 	spinlock_obtain(&vept_desc_bucket_lock);
408 	stac();
409 
410 	p_shadow_ept_page = (uint64_t *)(desc->shadow_eptp & PAGE_MASK);
411 	p_guest_ept_page = gpa2hva(vcpu->vm, desc->guest_eptp & PAGE_MASK);
412 
413 	for (pt_level = IA32E_PML4; (p_guest_ept_page != NULL) && (pt_level <= IA32E_PT); pt_level++) {
414 		offset = PAGING_ENTRY_OFFSET(l2_ept_violation_gpa, pt_level);
415 		guest_ept_entry = p_guest_ept_page[offset];
416 		shadow_ept_entry = p_shadow_ept_page[offset];
417 
418 		/*
419 		 * If guest EPT entry is non-exist, reflect EPT violation to L1 VM.
420 		 */
421 		if (!is_present_ept_entry(guest_ept_entry)) {
422 			break;
423 		}
424 
425 		if (is_ept_entry_misconfig(guest_ept_entry, pt_level)) {
426 			/* Inject EPT_MISCONFIGURATION to L1 VM */
427 			exec_vmwrite(VMX_EXIT_REASON, VMX_EXIT_REASON_EPT_MISCONFIGURATION);
428 			break;
429 		}
430 
431 		if (is_access_violation(guest_ept_entry)) {
432 			break;
433 		}
434 
435 		/* Shadow EPT entry is non-exist, create it */
436 		if (!is_present_ept_entry(shadow_ept_entry)) {
437 			/* Create a shadow EPT entry */
438 			shadow_ept_entry = generate_shadow_ept_entry(vcpu, guest_ept_entry, pt_level);
439 			p_shadow_ept_page[offset] = shadow_ept_entry;
440 			if (shadow_ept_entry == 0UL) {
441 				/*
442 				 * TODO:
443 				 * For invalid GPA in guest EPT entries, now reflect the violation to L1 VM.
444 				 * Need to revisit this and evaluate if need to emulate the invalid GPA
445 				 * access of L2 in HV directly.
446 				 */
447 				break;
448 			}
449 		}
450 
451 		/*
452 		 * SDM 28.3.3.4 Guidelines for Use of the INVEPT Instruction:
453 		 * Software may use the INVEPT instruction after modifying a present EPT
454 		 * paging-structure entry (see Section 28.2.2) to change any of the
455 		 * privilege bits 2:0 from 0 to 1. Failure to do so may cause an EPT
456 		 * violation that would not otherwise occur. Because an EPT violation
457 		 * invalidates any mappings that would be used by the access that caused
458 		 * the EPT violation (see Section 28.3.3.1), an EPT violation will not
459 		 * recur if the original access is performed again, even if the INVEPT
460 		 * instruction is not executed.
461 		 *
462 		 * If access bits of guest EPT entry is added after shadow EPT entry setup,
463 		 * guest VM may not call INVEPT. Sync it here directly.
464 		 */
465 		shadow_ept_entry = (shadow_ept_entry & ~EPT_RWX) | (guest_ept_entry & EPT_RWX);
466 		p_shadow_ept_page[offset] = shadow_ept_entry;
467 
468 		/* Shadow EPT entry exists */
469 		if (is_leaf_ept_entry(guest_ept_entry, pt_level)) {
470 			/* Shadow EPT is set up, let L2 VM re-execute the instruction. */
471 			if ((exec_vmread32(VMX_IDT_VEC_INFO_FIELD) & VMX_INT_INFO_VALID) == 0U) {
472 				is_l1_vmexit = false;
473 			}
474 			break;
475 		} else {
476 			/* Set up next level EPT entries. */
477 			p_shadow_ept_page = hpa2hva(shadow_ept_entry & EPT_ENTRY_PFN_MASK);
478 			p_guest_ept_page = gpa2hva(vcpu->vm, guest_ept_entry & EPT_ENTRY_PFN_MASK);
479 		}
480 	}
481 
482 	clac();
483 	spinlock_release(&vept_desc_bucket_lock);
484 
485 	return is_l1_vmexit;
486 }
487 
488 /**
489  * @pre vcpu != NULL
490  */
invept_vmexit_handler(struct acrn_vcpu * vcpu)491 int32_t invept_vmexit_handler(struct acrn_vcpu *vcpu)
492 {
493 	uint32_t i;
494 	struct vept_desc *desc;
495 	struct invept_desc operand_gla_ept;
496 	uint64_t type, ept_cap_vmsr;
497 
498 	if (check_vmx_permission(vcpu)) {
499 		ept_cap_vmsr = vcpu_get_guest_msr(vcpu, MSR_IA32_VMX_EPT_VPID_CAP);
500 		type = get_invvpid_ept_operands(vcpu, (void *)&operand_gla_ept, sizeof(operand_gla_ept));
501 		if (gpa2hpa(vcpu->vm, operand_gla_ept.eptp) == INVALID_HPA) {
502 			nested_vmx_result(VMfailValid, VMXERR_INVEPT_INVVPID_INVALID_OPERAND);
503 		} else if (type == 1 && (ept_cap_vmsr & VMX_EPT_INVEPT_SINGLE_CONTEXT) != 0UL) {
504 			/* Single-context invalidation */
505 			/* Find corresponding vept_desc of the invalidated EPTP */
506 			desc = get_vept_desc(operand_gla_ept.eptp);
507 			if (desc) {
508 				spinlock_obtain(&vept_desc_bucket_lock);
509 				if (desc->shadow_eptp != 0UL) {
510 					/*
511 					 * Since ACRN does not know which paging entries are changed,
512 					 * Remove all the shadow EPT entries that ACRN created for L2 VM
513 					 */
514 					free_sept_table((void *)(desc->shadow_eptp & PAGE_MASK));
515 					invept((void *)(desc->shadow_eptp & PAGE_MASK));
516 				}
517 				spinlock_release(&vept_desc_bucket_lock);
518 				put_vept_desc(operand_gla_ept.eptp);
519 			}
520 			nested_vmx_result(VMsucceed, 0);
521 		} else if ((type == 2) && (ept_cap_vmsr & VMX_EPT_INVEPT_GLOBAL_CONTEXT) != 0UL) {
522 			/* Global invalidation */
523 			spinlock_obtain(&vept_desc_bucket_lock);
524 			/*
525 			 * Invalidate all shadow EPTPs of L1 VM
526 			 * TODO: Invalidating all L2 vCPU associated EPTPs is enough. How?
527 			 */
528 			for (i = 0L; i < CONFIG_MAX_GUEST_EPT_NUM; i++) {
529 				if (vept_desc_bucket[i].guest_eptp != 0UL) {
530 					desc = &vept_desc_bucket[i];
531 					free_sept_table((void *)(desc->shadow_eptp & PAGE_MASK));
532 					invept((void *)(desc->shadow_eptp & PAGE_MASK));
533 				}
534 			}
535 			spinlock_release(&vept_desc_bucket_lock);
536 			nested_vmx_result(VMsucceed, 0);
537 		} else {
538 			nested_vmx_result(VMfailValid, VMXERR_INVEPT_INVVPID_INVALID_OPERAND);
539 		}
540 	}
541 
542 	return 0;
543 }
544 
init_vept(void)545 void init_vept(void)
546 {
547 	init_vept_pool();
548 	sept_page_pool.start_page = sept_pages;
549 	sept_page_pool.bitmap_size = calc_sept_page_num() / 64U;
550 	sept_page_pool.bitmap = sept_page_bitmap;
551         sept_page_pool.dummy_page = NULL;
552 	spinlock_init(&sept_page_pool.lock);
553 	memset((void *)sept_page_pool.bitmap, 0, sept_page_pool.bitmap_size * sizeof(uint64_t));
554 	sept_page_pool.last_hint_id = 0UL;
555 
556 	spinlock_init(&vept_desc_bucket_lock);
557 }
558