1 /*
2  * Copyright (C) 2021-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #ifndef VEPT_H
7 #define VEPT_H
8 
9 #ifdef CONFIG_NVMX_ENABLED
10 
11 #define RESERVED_BITS(start, end) (((1UL << (end - start + 1)) - 1) << start)
12 #define IA32E_PML4E_RESERVED_BITS(phy_addr_width)	(RESERVED_BITS(3U, 7U) | RESERVED_BITS(phy_addr_width, 51U))
13 #define IA32E_PDPTE_RESERVED_BITS(phy_addr_width)	(RESERVED_BITS(3U, 6U) | RESERVED_BITS(phy_addr_width, 51U))
14 #define IA32E_PDPTE_LEAF_RESERVED_BITS(phy_addr_width)	(RESERVED_BITS(12U,29U)| RESERVED_BITS(phy_addr_width, 51U))
15 #define IA32E_PDE_RESERVED_BITS(phy_addr_width)		(RESERVED_BITS(3U, 6U) | RESERVED_BITS(phy_addr_width, 51U))
16 #define IA32E_PDE_LEAF_RESERVED_BITS(phy_addr_width)	(RESERVED_BITS(12U,20U)| RESERVED_BITS(phy_addr_width, 51U))
17 #define IA32E_PTE_RESERVED_BITS(phy_addr_width)		(RESERVED_BITS(phy_addr_width, 51U))
18 
19 #define PAGING_ENTRY_SHIFT(lvl)		((IA32E_PT - (lvl)) * 9U + PTE_SHIFT)
20 #define PAGING_ENTRY_OFFSET(addr, lvl)	(((addr) >> PAGING_ENTRY_SHIFT(lvl)) & (PTRS_PER_PTE - 1UL))
21 
22 /*
23  * A descriptor to store info of nested EPT
24  */
25 struct vept_desc {
26 	/*
27 	 * An guest EPTP configured by L1 VM.
28 	 * The format is same with 'EPT pointer' in VMCS.
29 	 * Its PML4 address field is a GPA of the L1 VM.
30 	 */
31 	uint64_t guest_eptp;
32 	/*
33 	 * A shadow EPTP.
34 	 * The format is same with 'EPT pointer' in VMCS.
35 	 * Its PML4 address field is a HVA of the hypervisor.
36 	 */
37 	uint64_t shadow_eptp;
38 	uint32_t ref_count;
39 };
40 
41 void init_vept(void);
42 uint64_t get_shadow_eptp(uint64_t guest_eptp);
43 struct vept_desc *get_vept_desc(uint64_t guest_eptp);
44 void put_vept_desc(uint64_t guest_eptp);
45 bool handle_l2_ept_violation(struct acrn_vcpu *vcpu);
46 int32_t invept_vmexit_handler(struct acrn_vcpu *vcpu);
47 #else
init_vept(void)48 static inline void init_vept(void) {};
49 #endif /* CONFIG_NVMX_ENABLED */
50 #endif /* VEPT_H */
51