1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef VM_H_
8 #define VM_H_
9 
10 /* Defines for VM Launch and Resume */
11 #define VM_RESUME		0
12 #define VM_LAUNCH		1
13 
14 #ifndef ASSEMBLER
15 
16 #include <asm/lib/bits.h>
17 #include <asm/lib/spinlock.h>
18 #include <asm/pgtable.h>
19 #include <asm/guest/vcpu.h>
20 #include <vioapic.h>
21 #include <vpic.h>
22 #include <asm/guest/vmx_io.h>
23 #include <vuart.h>
24 #include <vrtc.h>
25 #include <asm/guest/trusty.h>
26 #include <asm/guest/vcpuid.h>
27 #include <vpci.h>
28 #include <asm/cpu_caps.h>
29 #include <asm/e820.h>
30 #include <asm/vm_config.h>
31 #include <io_req.h>
32 #ifdef CONFIG_HYPERV_ENABLED
33 #include <asm/guest/hyperv.h>
34 #endif
35 
36 enum reset_mode {
37 	POWER_ON_RESET,		/* reset by hardware Power-on */
38 	COLD_RESET,		/* hardware cold reset */
39 	WARM_RESET,		/* behavior slightly differ from cold reset, that some MSRs might be retained. */
40 	INIT_RESET,		/* reset by INIT */
41 	SOFTWARE_RESET,		/* reset by software disable<->enable */
42 	RESUME_FROM_S3,		/* reset core states after resuming from S3 */
43 };
44 
45 struct vm_hw_info {
46 	/* vcpu array of this VM */
47 	struct acrn_vcpu vcpu_array[MAX_VCPUS_PER_VM];
48 	uint16_t created_vcpus;	/* Number of created vcpus */
49 	uint64_t cpu_affinity;	/* Actual pCPUs this VM runs on. The set bits represent the pCPU IDs */
50 } __aligned(PAGE_SIZE);
51 
52 struct sw_module_info {
53 	/* sw modules like ramdisk, bootargs, firmware, etc. */
54 	void *src_addr;			/* HVA */
55 	void *load_addr;		/* GPA */
56 	uint32_t size;
57 };
58 
59 struct sw_kernel_info {
60 	void *kernel_src_addr;		/* HVA */
61 	void *kernel_entry_addr;	/* GPA */
62 	uint32_t kernel_size;
63 };
64 
65 struct vm_sw_info {
66 	enum os_kernel_type kernel_type;	/* Guest kernel type */
67 	/* Kernel information (common for all guest types) */
68 	struct sw_kernel_info kernel_info;
69 	struct sw_module_info bootargs_info;
70 	struct sw_module_info ramdisk_info;
71 	struct sw_module_info acpi_info;
72 	/* HVA to IO shared page */
73 	void *io_shared_page;
74 	void *asyncio_sbuf;
75 	void *vm_event_sbuf;
76 	/* If enable IO completion polling mode */
77 	bool is_polling_ioreq;
78 };
79 
80 struct vm_pm_info {
81 	uint8_t			px_cnt;		/* count of all Px states */
82 	struct acrn_pstate_data	px_data[MAX_PSTATE];
83 	uint8_t			cx_cnt;		/* count of all Cx entries */
84 	struct acrn_cstate_data	cx_data[MAX_CSTATE];
85 	struct pm_s_state_data	*sx_state_data;	/* data for S3/S5 implementation */
86 };
87 
88 /* Enumerated type for VM states */
89 enum vm_state {
90 	VM_POWERED_OFF = 0,   /* MUST set 0 because vm_state's initialization depends on clear BSS section */
91 	VM_CREATED,	/* VM created / awaiting start (boot) */
92 	VM_RUNNING,	/* VM running */
93 	VM_READY_TO_POWEROFF,     /* RTVM only, it is trying to poweroff by itself */
94 	VM_PAUSED,	/* VM paused */
95 };
96 
97 enum vm_vlapic_mode {
98 	VM_VLAPIC_DISABLED = 0U,
99 	VM_VLAPIC_XAPIC,
100 	VM_VLAPIC_X2APIC,
101 	VM_VLAPIC_TRANSITION
102 };
103 
104 struct vm_arch {
105 	/* I/O bitmaps A and B for this VM, MUST be 4-Kbyte aligned */
106 	uint8_t io_bitmap[PAGE_SIZE*2];
107 
108 	/* EPT hierarchy for Normal World */
109 	void *nworld_eptp;
110 	/* EPT hierarchy for Secure World
111 	 * Secure world can access Normal World's memory,
112 	 * but Normal World can not access Secure World's memory.
113 	 */
114 	void *sworld_eptp;
115 	struct pgtable ept_pgtable;
116 
117 	struct acrn_vioapics vioapics;	/* Virtual IOAPIC/s */
118 	struct acrn_vpic vpic;      /* Virtual PIC */
119 #ifdef CONFIG_HYPERV_ENABLED
120 	struct acrn_hyperv hyperv;
121 #endif
122 	enum vm_vlapic_mode vlapic_mode; /* Represents vLAPIC mode across vCPUs*/
123 
124 	/*
125 	 * Keylocker spec 4.5:
126 	 * Bit 0 - Backup/restore valid.
127 	 * Bit 1 - Reserved.
128 	 * Bit 2 - Backup key storage read/write error.
129 	 * Bit 3 - IWKeyBackup consumed.
130 	 * Bit 63:4 - Reserved.
131 	 */
132 	uint64_t iwkey_backup_status;
133 	spinlock_t iwkey_backup_lock;	/* Spin-lock used to protect internal key backup/restore */
134 	struct iwkey iwkey_backup;
135 
136 } __aligned(PAGE_SIZE);
137 
138 struct acrn_vm {
139 	struct vm_arch arch_vm; /* Reference to this VM's arch information */
140 	struct vm_hw_info hw;	/* Reference to this VM's HW information */
141 	struct vm_sw_info sw;	/* Reference to SW associated with this VM */
142 	struct vm_pm_info pm;	/* Reference to this VM's arch information */
143 	uint32_t e820_entry_num;
144 	struct e820_entry *e820_entries;
145 	uint16_t vm_id;		    /* Virtual machine identifier */
146 	enum vm_state state;	/* VM state */
147 	struct acrn_vuart vuart[MAX_VUART_NUM_PER_VM];		/* Virtual UART */
148 	struct asyncio_desc	aio_desc[ACRN_ASYNCIO_MAX];
149 	struct list_head aiodesc_queue;
150 	spinlock_t asyncio_lock; /* Spin-lock used to protect asyncio add/remove for a VM */
151 	spinlock_t vm_event_lock;
152 
153 	enum vpic_wire_mode wire_mode;
154 	struct iommu_domain *iommu;	/* iommu domain of this VM */
155 	/* vm_state_lock used to protect vm/vcpu state transition,
156 	 * the initialization depends on the clear BSS section
157 	 */
158 	spinlock_t vm_state_lock;
159 	spinlock_t wbinvd_lock;		/* Spin-lock used to serialize wbinvd emulation */
160 	spinlock_t vlapic_mode_lock;	/* Spin-lock used to protect vlapic_mode modifications for a VM */
161 	spinlock_t ept_lock;	/* Spin-lock used to protect ept add/modify/remove for a VM */
162 	spinlock_t emul_mmio_lock;	/* Used to protect emulation mmio_node concurrent access for a VM */
163 	uint16_t nr_emul_mmio_regions;	/* the emulated mmio_region number */
164 	struct mem_io_node emul_mmio[CONFIG_MAX_EMULATED_MMIO_REGIONS];
165 
166 	struct vm_io_handler_desc emul_pio[EMUL_PIO_IDX_MAX];
167 
168 	char name[MAX_VM_NAME_LEN];
169 	struct secure_world_control sworld_control;
170 
171 	/* Secure World's snapshot
172 	 * Currently, Secure World is only running on vcpu[0],
173 	 * so the snapshot only stores the vcpu0's run_context
174 	 * of secure world.
175 	 */
176 	struct guest_cpu_context sworld_snapshot;
177 
178 	uint32_t vcpuid_entry_nr, vcpuid_level, vcpuid_xlevel;
179 	struct vcpuid_entry vcpuid_entries[MAX_VM_VCPUID_ENTRIES];
180 	struct acrn_vpci vpci;
181 	struct acrn_vrtc vrtc;
182 
183 	uint64_t intr_inject_delay_delta; /* delay of intr injection */
184 	uint32_t reset_control;
185 } __aligned(PAGE_SIZE);
186 
187 /*
188  * @pre vlapic != NULL
189  */
vm_active_cpus(const struct acrn_vm * vm)190 static inline uint64_t vm_active_cpus(const struct acrn_vm *vm)
191 {
192 	uint64_t dmask = 0UL;
193 	uint16_t i;
194 	const struct acrn_vcpu *vcpu;
195 
196 	foreach_vcpu(i, vm, vcpu) {
197 		bitmap_set_nolock(vcpu->vcpu_id, &dmask);
198 	}
199 
200 	return dmask;
201 }
202 
203 /*
204  * @pre vcpu_id < MAX_VCPUS_PER_VM
205  * @pre &(vm->hw.vcpu_array[vcpu_id])->state != VCPU_OFFLINE
206  */
vcpu_from_vid(struct acrn_vm * vm,uint16_t vcpu_id)207 static inline struct acrn_vcpu *vcpu_from_vid(struct acrn_vm *vm, uint16_t vcpu_id)
208 {
209 	return &(vm->hw.vcpu_array[vcpu_id]);
210 }
211 
vcpu_from_pid(struct acrn_vm * vm,uint16_t pcpu_id)212 static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_id)
213 {
214 	uint16_t i;
215 	struct acrn_vcpu *vcpu, *target_vcpu = NULL;
216 
217 	foreach_vcpu(i, vm, vcpu) {
218 		if (pcpuid_from_vcpu(vcpu) == pcpu_id) {
219 			target_vcpu = vcpu;
220 			break;
221 		}
222 	}
223 
224 	return target_vcpu;
225 }
226 
227 /* Convert relative vm id to absolute vm id */
rel_vmid_2_vmid(uint16_t service_vmid,uint16_t rel_vmid)228 static inline uint16_t rel_vmid_2_vmid(uint16_t service_vmid, uint16_t rel_vmid) {
229 	return (service_vmid + rel_vmid);
230 }
231 
232 /* Convert absolute vm id to relative vm id */
vmid_2_rel_vmid(uint16_t service_vmid,uint16_t vmid)233 static inline uint16_t vmid_2_rel_vmid(uint16_t service_vmid, uint16_t vmid) {
234 	return (vmid - service_vmid);
235 }
236 
is_severity_pass(uint16_t target_vmid)237 static inline bool is_severity_pass(uint16_t target_vmid)
238 {
239 	return SEVERITY_SERVICE_VM >= get_vm_severity(target_vmid);
240 }
241 
242 void make_shutdown_vm_request(uint16_t pcpu_id);
243 bool need_shutdown_vm(uint16_t pcpu_id);
244 int32_t shutdown_vm(struct acrn_vm *vm);
245 void poweroff_if_rt_vm(struct acrn_vm *vm);
246 void pause_vm(struct acrn_vm *vm);
247 void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec);
248 void start_vm(struct acrn_vm *vm);
249 int32_t reset_vm(struct acrn_vm *vm, enum reset_mode mode);
250 int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm);
251 int32_t prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config);
252 void launch_vms(uint16_t pcpu_id);
253 bool is_poweroff_vm(const struct acrn_vm *vm);
254 bool is_created_vm(const struct acrn_vm *vm);
255 bool is_paused_vm(const struct acrn_vm *vm);
256 bool is_service_vm(const struct acrn_vm *vm);
257 bool is_postlaunched_vm(const struct acrn_vm *vm);
258 bool is_prelaunched_vm(const struct acrn_vm *vm);
259 uint16_t get_vmid_by_name(const char *name);
260 struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
261 struct acrn_vm *get_service_vm(void);
262 
263 void create_service_vm_e820(struct acrn_vm *vm);
264 void create_prelaunched_vm_e820(struct acrn_vm *vm);
265 void prepare_vm_identical_memmap(struct acrn_vm *vm, uint16_t e820_entry_type, uint64_t prot_orig);
266 uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr);
267 
268 int32_t prepare_os_image(struct acrn_vm *vm);
269 
270 void suspend_vrtc(void);
271 void resume_vrtc(void);
272 void vrtc_init(struct acrn_vm *vm);
273 
274 bool is_lapic_pt_configured(const struct acrn_vm *vm);
275 bool is_pmu_pt_configured(const struct acrn_vm *vm);
276 bool is_rt_vm(const struct acrn_vm *vm);
277 bool is_stateful_vm(const struct acrn_vm *vm);
278 bool is_nvmx_configured(const struct acrn_vm *vm);
279 bool is_vcat_configured(const struct acrn_vm *vm);
280 bool is_static_configured_vm(const struct acrn_vm *vm);
281 uint16_t get_unused_vmid(void);
282 bool is_pi_capable(const struct acrn_vm *vm);
283 bool has_rt_vm(void);
284 struct acrn_vm *get_highest_severity_vm(bool runtime);
285 bool vm_hide_mtrr(const struct acrn_vm *vm);
286 void update_vm_vlapic_state(struct acrn_vm *vm);
287 enum vm_vlapic_mode check_vm_vlapic_mode(const struct acrn_vm *vm);
288 bool is_vhwp_configured(const struct acrn_vm *vm);
289 bool is_vtm_configured(const struct acrn_vm *vm);
290 /*
291  * @pre vm != NULL
292  */
293 void get_vm_lock(struct acrn_vm *vm);
294 
295 /*
296  * @pre vm != NULL
297  */
298 void put_vm_lock(struct acrn_vm *vm);
299 
300 void *get_sworld_memory_base(void);
301 #endif /* !ASSEMBLER */
302 
303 #endif /* VM_H_ */
304