1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 /**
8 * @file vcpu.h
9 *
10 * @brief public APIs for vcpu operations
11 */
12
13 #ifndef VCPU_H
14 #define VCPU_H
15
16
17 #ifndef ASSEMBLER
18
19 #include <acrn_common.h>
20 #include <asm/guest/guest_memory.h>
21 #include <asm/guest/virtual_cr.h>
22 #include <asm/guest/vlapic.h>
23 #include <asm/guest/vmtrr.h>
24 #include <schedule.h>
25 #include <event.h>
26 #include <io_req.h>
27 #include <asm/msr.h>
28 #include <asm/cpu.h>
29 #include <asm/guest/instr_emul.h>
30 #include <asm/guest/nested.h>
31 #include <asm/vmx.h>
32 #include <asm/vm_config.h>
33
34 /**
35 * @brief vcpu
36 *
37 * @defgroup acrn_vcpu ACRN vcpu
38 * @{
39 */
40
41 /*
42 * VCPU related APIs
43 */
44
45 /**
46 * @defgroup virt_int_injection Event ID supported for virtual interrupt injection
47 *
48 * This is a group that includes Event ID supported for virtual interrupt injection.
49 *
50 * @{
51 */
52
53 /**
54 * @brief Request for exception injection
55 */
56 #define ACRN_REQUEST_EXCP 0U
57
58 /**
59 * @brief Request for vLAPIC event
60 */
61 #define ACRN_REQUEST_EVENT 1U
62
63 /**
64 * @brief Request for external interrupt from vPIC
65 */
66 #define ACRN_REQUEST_EXTINT 2U
67
68 /**
69 * @brief Request for non-maskable interrupt
70 */
71 #define ACRN_REQUEST_NMI 3U
72
73 /**
74 * @brief Request for EOI exit bitmap update
75 */
76 #define ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE 4U
77
78 /**
79 * @brief Request for EPT flush
80 */
81 #define ACRN_REQUEST_EPT_FLUSH 5U
82
83 /**
84 * @brief Request for triple fault
85 */
86 #define ACRN_REQUEST_TRP_FAULT 6U
87
88 /**
89 * @brief Request for VPID TLB flush
90 */
91 #define ACRN_REQUEST_VPID_FLUSH 7U
92
93 /**
94 * @brief Request for initilizing VMCS
95 */
96 #define ACRN_REQUEST_INIT_VMCS 8U
97
98 /**
99 * @brief Request for sync waiting WBINVD
100 */
101 #define ACRN_REQUEST_WAIT_WBINVD 9U
102
103 /**
104 * @brief Request for split lock operation
105 */
106 #define ACRN_REQUEST_SPLIT_LOCK 10U
107
108 #define ACRN_REQUEST_SMP_CALL 11U
109
110 /**
111 * @}
112 */
113 /* End of virt_int_injection */
114
115 #define save_segment(seg, SEG_NAME) \
116 { \
117 (seg).selector = exec_vmread16(SEG_NAME##_SEL); \
118 (seg).base = exec_vmread(SEG_NAME##_BASE); \
119 (seg).limit = exec_vmread32(SEG_NAME##_LIMIT); \
120 (seg).attr = exec_vmread32(SEG_NAME##_ATTR); \
121 }
122
123 #define load_segment(seg, SEG_NAME) \
124 { \
125 exec_vmwrite16(SEG_NAME##_SEL, (seg).selector); \
126 exec_vmwrite(SEG_NAME##_BASE, (seg).base); \
127 exec_vmwrite32(SEG_NAME##_LIMIT, (seg).limit); \
128 exec_vmwrite32(SEG_NAME##_ATTR, (seg).attr); \
129 }
130
131 /* Define segments constants for guest */
132 #define REAL_MODE_BSP_INIT_CODE_SEL (0xf000U)
133 #define REAL_MODE_DATA_SEG_AR (0x0093U)
134 #define REAL_MODE_CODE_SEG_AR (0x009fU)
135 #define PROTECTED_MODE_DATA_SEG_AR (0xc093U)
136 #define PROTECTED_MODE_CODE_SEG_AR (0xc09bU)
137 #define REAL_MODE_SEG_LIMIT (0xffffU)
138 #define PROTECTED_MODE_SEG_LIMIT (0xffffffffU)
139 #define DR7_INIT_VALUE (0x400UL)
140 #define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
141 #define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
142
143 #define foreach_vcpu(idx, vm, vcpu) \
144 for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
145 (idx) < (vm)->hw.created_vcpus; \
146 (idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
147 if ((vcpu)->state != VCPU_OFFLINE)
148
149 enum vcpu_state {
150 VCPU_OFFLINE = 0U,
151 VCPU_INIT,
152 VCPU_RUNNING,
153 VCPU_ZOMBIE,
154 };
155
156 enum vm_cpu_mode {
157 CPU_MODE_REAL,
158 CPU_MODE_PROTECTED,
159 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
160 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
161 };
162
163 #define VCPU_EVENT_IOREQ 0
164 #define VCPU_EVENT_VIRTUAL_INTERRUPT 1
165 #define VCPU_EVENT_SYNC_WBINVD 2
166 #define VCPU_EVENT_SPLIT_LOCK 3
167 #define VCPU_EVENT_NUM 4
168
169
170 enum reset_mode;
171
172 /* 2 worlds: 0 for Normal World, 1 for Secure World */
173 #define NR_WORLD 2
174 #define NORMAL_WORLD 0
175 #define SECURE_WORLD 1
176
177 #define NUM_WORLD_MSRS 2U
178 #define NUM_COMMON_MSRS 36U
179
180 #ifdef CONFIG_VCAT_ENABLED
181 #define NUM_CAT_L2_MSRS MAX_CACHE_CLOS_NUM_ENTRIES
182 #define NUM_CAT_L3_MSRS MAX_CACHE_CLOS_NUM_ENTRIES
183
184 /* L2/L3 mask MSRs plus MSR_IA32_PQR_ASSOC */
185 #define NUM_CAT_MSRS (NUM_CAT_L2_MSRS + NUM_CAT_L3_MSRS + 1U)
186
187 #else
188 #define NUM_CAT_MSRS 0U
189 #endif
190
191 #ifdef CONFIG_NVMX_ENABLED
192 #define FLEXIBLE_MSR_INDEX (NUM_WORLD_MSRS + NUM_COMMON_MSRS + NUM_VMX_MSRS)
193 #else
194 #define FLEXIBLE_MSR_INDEX (NUM_WORLD_MSRS + NUM_COMMON_MSRS)
195 #endif
196
197 #define NUM_EMULATED_MSRS (FLEXIBLE_MSR_INDEX + NUM_CAT_MSRS)
198 /* For detailed layout of the emulated guest MSRs, see emulated_guest_msrs[NUM_EMULATED_MSRS] in vmsr.c */
199
200 #define EOI_EXIT_BITMAP_SIZE 256U
201
202 struct guest_cpu_context {
203 struct run_context run_ctx;
204 struct ext_context ext_ctx;
205
206 /* per world MSRs, need isolation between secure and normal world */
207 uint32_t world_msrs[NUM_WORLD_MSRS];
208 };
209
210 /* Intel SDM 24.8.2, the address must be 16-byte aligned */
211 struct msr_store_entry {
212 uint32_t msr_index;
213 uint32_t reserved;
214 uint64_t value;
215 } __aligned(16);
216
217 #define MSR_AREA_COUNT 2 /* the max MSRs in auto load/store area */
218
219 struct msr_store_area {
220 struct msr_store_entry guest[MSR_AREA_COUNT];
221 struct msr_store_entry host[MSR_AREA_COUNT];
222 uint32_t index_of_pqr_assoc;
223 uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */
224 };
225
226 struct iwkey {
227 /* 256bit encryption key */
228 uint64_t encryption_key[4];
229 /* 128bit integration key */
230 uint64_t integrity_key[2];
231 };
232
233 struct acrn_vcpu_arch {
234 /* vmcs region for this vcpu, MUST be 4KB-aligned. This is VMCS01 when nested VMX is enabled */
235 uint8_t vmcs[PAGE_SIZE];
236
237 /* context for nested virtualization, 4KB-aligned */
238 struct acrn_nested nested;
239
240 /* MSR bitmap region for this vcpu, MUST be 4-Kbyte aligned */
241 uint8_t msr_bitmap[PAGE_SIZE];
242
243 /* per vcpu lapic */
244 struct acrn_vlapic vlapic;
245
246 /* pid MUST be 64 bytes aligned */
247 struct pi_desc pid __aligned(64);
248
249 struct acrn_vmtrr vmtrr;
250
251 int32_t cur_context;
252 struct guest_cpu_context contexts[NR_WORLD];
253
254 /* common MSRs, world_msrs[] is a subset of it */
255 uint64_t guest_msrs[NUM_EMULATED_MSRS];
256
257 #define ALLOCATED_MIN_L1_VPID (0x10000U - CONFIG_MAX_VM_NUM * MAX_VCPUS_PER_VM)
258 uint16_t vpid;
259
260 /* Holds the information needed for IRQ/exception handling. */
261 struct {
262 /* The number of the exception to raise. */
263 uint32_t exception;
264
265 /* The error number for the exception. */
266 uint32_t error;
267 } exception_info;
268
269 bool lapic_pt_enabled;
270 bool irq_window_enabled;
271 bool emulating_lock;
272 bool xsave_enabled;
273
274 /* VCPU context state information */
275 uint32_t exit_reason;
276 uint32_t idt_vectoring_info;
277 uint64_t exit_qualification;
278 uint32_t proc_vm_exec_ctrls;
279 uint32_t inst_len;
280
281 /* Information related to secondary / AP VCPU start-up */
282 enum vm_cpu_mode cpu_mode;
283 uint8_t nr_sipi;
284
285 /* interrupt injection information */
286 uint64_t pending_req;
287
288 /* List of MSRS to be stored and loaded on VM exits or VM entries */
289 struct msr_store_area msr_area;
290
291 /* EOI_EXIT_BITMAP buffer, for the bitmap update */
292 uint64_t eoi_exit_bitmap[EOI_EXIT_BITMAP_SIZE >> 6U];
293
294 /* Keylocker */
295 struct iwkey IWKey;
296 bool cr4_kl_enabled;
297 /*
298 * Keylocker spec 4.4:
299 * Bit 0 - Status of most recent copy to or from IWKeyBackup.
300 * Bit 63:1 - Reserved.
301 */
302 uint64_t iwkey_copy_status;
303 } __aligned(PAGE_SIZE);
304
305 struct acrn_vm;
306 struct acrn_vcpu {
307 uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
308
309 /* Architecture specific definitions for this VCPU */
310 struct acrn_vcpu_arch arch;
311 uint16_t vcpu_id; /* virtual identifier for VCPU */
312 struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */
313
314 volatile enum vcpu_state state; /* State of this VCPU */
315
316 struct thread_object thread_obj;
317 bool launched; /* Whether the vcpu is launched on target pcpu */
318
319 struct instr_emul_ctxt inst_ctxt;
320 struct io_request req; /* used by io/ept emulation */
321
322 uint64_t reg_cached;
323 uint64_t reg_updated;
324
325 struct sched_event events[VCPU_EVENT_NUM];
326 } __aligned(PAGE_SIZE);
327
328 struct vcpu_dump {
329 struct acrn_vcpu *vcpu;
330 char *str;
331 uint32_t str_max;
332 };
333
334 struct guest_mem_dump {
335 struct acrn_vcpu *vcpu;
336 uint64_t gva;
337 uint64_t len;
338 };
339
is_vcpu_bsp(const struct acrn_vcpu * vcpu)340 static inline bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
341 {
342 return (vcpu->vcpu_id == BSP_CPU_ID);
343 }
344
get_vcpu_mode(const struct acrn_vcpu * vcpu)345 static inline enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
346 {
347 return vcpu->arch.cpu_mode;
348 }
349
350 /* do not update Guest RIP for next VM Enter */
vcpu_retain_rip(struct acrn_vcpu * vcpu)351 static inline void vcpu_retain_rip(struct acrn_vcpu *vcpu)
352 {
353 (vcpu)->arch.inst_len = 0U;
354 }
355
vcpu_vlapic(struct acrn_vcpu * vcpu)356 static inline struct acrn_vlapic *vcpu_vlapic(struct acrn_vcpu *vcpu)
357 {
358 return &(vcpu->arch.vlapic);
359 }
360
361 /**
362 * @brief Get pointer to PI description.
363 *
364 * @param[in] vcpu Target vCPU
365 *
366 * @return pointer to PI description
367 *
368 * @pre vcpu != NULL
369 */
get_pi_desc(struct acrn_vcpu * vcpu)370 static inline struct pi_desc *get_pi_desc(struct acrn_vcpu *vcpu)
371 {
372 return &(vcpu->arch.pid);
373 }
374
375 uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu);
376 void default_idle(__unused struct thread_object *obj);
377 void vcpu_thread(struct thread_object *obj);
378
379 int32_t vmx_vmrun(struct run_context *context, int32_t ops, int32_t ibrs);
380
381 /* External Interfaces */
382
383 /**
384 * @brief get vcpu register value
385 *
386 * Get target vCPU's general purpose registers value in run_context.
387 *
388 * @param[in] vcpu pointer to vcpu data structure
389 * @param[in] reg register of the vcpu
390 *
391 * @return the value of the register.
392 */
393 uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg);
394
395 /**
396 * @brief set vcpu register value
397 *
398 * Set target vCPU's general purpose registers value in run_context.
399 *
400 * @param[inout] vcpu pointer to vcpu data structure
401 * @param[in] reg register of the vcpu
402 * @param[in] val the value set the register of the vcpu
403 */
404 void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val);
405
406 /**
407 * @brief get vcpu RIP value
408 *
409 * Get & cache target vCPU's RIP in run_context.
410 *
411 * @param[in] vcpu pointer to vcpu data structure
412 *
413 * @return the value of RIP.
414 */
415 uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu);
416
417 /**
418 * @brief set vcpu RIP value
419 *
420 * Update target vCPU's RIP in run_context.
421 *
422 * @param[inout] vcpu pointer to vcpu data structure
423 * @param[in] val the value set RIP
424 */
425 void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val);
426
427 /**
428 * @brief get vcpu RSP value
429 *
430 * Get & cache target vCPU's RSP in run_context.
431 *
432 * @param[in] vcpu pointer to vcpu data structure
433 *
434 * @return the value of RSP.
435 */
436 uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu);
437
438 /**
439 * @brief set vcpu RSP value
440 *
441 * Update target vCPU's RSP in run_context.
442 *
443 * @param[inout] vcpu pointer to vcpu data structure
444 * @param[in] val the value set RSP
445 */
446 void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val);
447
448 /**
449 * @brief get vcpu EFER value
450 *
451 * Get & cache target vCPU's EFER in run_context.
452 *
453 * @param[in] vcpu pointer to vcpu data structure
454 *
455 * @return the value of EFER.
456 */
457 uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu);
458
459 /**
460 * @brief set vcpu EFER value
461 *
462 * Update target vCPU's EFER in run_context.
463 *
464 * @param[inout] vcpu pointer to vcpu data structure
465 * @param[in] val the value set EFER
466 */
467 void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val);
468
469 /**
470 * @brief get vcpu RFLAG value
471 *
472 * Get & cache target vCPU's RFLAGS in run_context.
473 *
474 * @param[in] vcpu pointer to vcpu data structure
475 *
476 * @return the value of RFLAGS.
477 */
478 uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu);
479
480 /**
481 * @brief set vcpu RFLAGS value
482 *
483 * Update target vCPU's RFLAGS in run_context.
484 *
485 * @param[inout] vcpu pointer to vcpu data structure
486 * @param[in] val the value set RFLAGS
487 */
488 void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val);
489
490 /**
491 * @brief get guest emulated MSR
492 *
493 * Get the content of emulated guest MSR
494 *
495 * @param[in] vcpu pointer to vcpu data structure
496 * @param[in] msr the guest MSR
497 *
498 * @return the value of emulated MSR.
499 */
500 uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr);
501
502 /**
503 * @brief set guest emulated MSR
504 *
505 * Update the content of emulated guest MSR
506 *
507 * @param[in] vcpu pointer to vcpu data structure
508 * @param[in] msr the guest MSR
509 * @param[in] val the value to set the target MSR
510 */
511 void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val);
512
513 /**
514 * @brief write eoi_exit_bitmap to VMCS fields
515 *
516 * @param[in] vcpu pointer to vcpu data structure
517 */
518 void vcpu_set_vmcs_eoi_exit(const struct acrn_vcpu *vcpu);
519
520 /**
521 * @brief reset all eoi_exit_bitmaps
522 *
523 * @param[in] vcpu pointer to vcpu data structure
524 */
525
526 void vcpu_reset_eoi_exit_bitmaps(struct acrn_vcpu *vcpu);
527
528 /**
529 * @brief set eoi_exit_bitmap bit
530 *
531 * Set corresponding bit of vector in eoi_exit_bitmap
532 *
533 * @param[in] vcpu pointer to vcpu data structure
534 * @param[in] vector
535 */
536 void vcpu_set_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector);
537 /**
538 * @brief clear eoi_exit_bitmap bit
539 *
540 * Clear corresponding bit of vector in eoi_exit_bitmap
541 *
542 * @param[in] vcpu pointer to vcpu data structure
543 * @param[in] vector
544 */
545 void vcpu_clear_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector);
546 /**
547 * @brief set all the vcpu registers
548 *
549 * Update target vCPU's all registers in run_context.
550 *
551 * @param[inout] vcpu pointer to vcpu data structure
552 * @param[in] vcpu_regs all the registers' value
553 */
554 void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_regs *vcpu_regs);
555
556 /**
557 * @brief reset all the vcpu registers
558 *
559 * Reset target vCPU's all registers in run_context to initial values.
560 *
561 * @param[inout] vcpu pointer to vcpu data structure
562 * @param[in] mode the reset mode
563 */
564 void reset_vcpu_regs(struct acrn_vcpu *vcpu, enum reset_mode mode);
565
566 bool sanitize_cr0_cr4_pattern(void);
567
568 /**
569 * @brief Initialize the protect mode vcpu registers
570 *
571 * Initialize vCPU's all registers in run_context to initial protece mode values.
572 *
573 * @param[inout] vcpu pointer to vcpu data structure
574 * @param[in] vgdt_base_gpa guest physical address of gdt for guest
575 */
576 void init_vcpu_protect_mode_regs(struct acrn_vcpu *vcpu, uint64_t vgdt_base_gpa);
577
578 /**
579 * @brief set the vCPU startup entry
580 *
581 * Set target vCPU's startup entry in run_context.
582 *
583 * @param[inout] vcpu pointer to vCPU data structure
584 * @param[in] entry startup entry for the vCPU
585 */
586 void set_vcpu_startup_entry(struct acrn_vcpu *vcpu, uint64_t entry);
587
is_long_mode(struct acrn_vcpu * vcpu)588 static inline bool is_long_mode(struct acrn_vcpu *vcpu)
589 {
590 return (vcpu_get_efer(vcpu) & MSR_IA32_EFER_LMA_BIT) != 0UL;
591 }
592
is_paging_enabled(struct acrn_vcpu * vcpu)593 static inline bool is_paging_enabled(struct acrn_vcpu *vcpu)
594 {
595 return (vcpu_get_cr0(vcpu) & CR0_PG) != 0UL;
596 }
597
is_pae(struct acrn_vcpu * vcpu)598 static inline bool is_pae(struct acrn_vcpu *vcpu)
599 {
600 return (vcpu_get_cr4(vcpu) & CR4_PAE) != 0UL;
601 }
602
603 struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id);
604 struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id);
605
606 void save_xsave_area(struct acrn_vcpu *vcpu, struct ext_context *ectx);
607 void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx);
608 void load_iwkey(struct acrn_vcpu *vcpu);
609
610 /**
611 * @brief create a vcpu for the target vm
612 *
613 * Creates/allocates a vCPU instance, with initialization for its vcpu_id,
614 * vpid, vmcs, vlapic, etc. It sets the init vCPU state to VCPU_INIT
615 *
616 * @param[in] pcpu_id created vcpu will run on this pcpu
617 * @param[in] vm pointer to vm data structure, this vcpu will owned by this vm.
618 * @param[out] rtn_vcpu_handle pointer to the created vcpu
619 *
620 * @retval 0 vcpu created successfully, other values failed.
621 */
622 int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle);
623
624 /**
625 * @brief run into non-root mode based on vcpu setting
626 *
627 * An interface in vCPU thread to implement VM entry and VM exit.
628 * A CPU switches between VMX root mode and non-root mode based on it.
629 *
630 * @param[inout] vcpu pointer to vcpu data structure
631 * @pre vcpu != NULL
632 *
633 * @retval 0 vcpu run successfully, other values failed.
634 */
635 int32_t run_vcpu(struct acrn_vcpu *vcpu);
636
637 /**
638 * @brief unmap the vcpu with pcpu and free its vlapic
639 *
640 * Unmap the vcpu with pcpu and free its vlapic, and set the vcpu state to offline
641 *
642 * @param[inout] vcpu pointer to vcpu data structure
643 * @pre vcpu != NULL
644 * @pre vcpu->state == VCPU_ZOMBIE
645 */
646 void offline_vcpu(struct acrn_vcpu *vcpu);
647
648 /**
649 * @brief reset vcpu state and values
650 *
651 * Reset all fields in a vCPU instance, the vCPU state is reset to VCPU_INIT.
652 *
653 * @param[inout] vcpu pointer to vcpu data structure
654 * @param[in] mode the reset mode
655 * @pre vcpu != NULL
656 * @pre vcpu->state == VCPU_ZOMBIE
657 */
658 void reset_vcpu(struct acrn_vcpu *vcpu, enum reset_mode mode);
659
660 /**
661 * @brief pause the vcpu and set new state
662 *
663 * Change a vCPU state to VCPU_ZOMBIE, and make a reschedule request for it.
664 *
665 * @param[inout] vcpu pointer to vcpu data structure
666 * @param[in] new_state the state to set vcpu
667 */
668 void zombie_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state);
669
670 /**
671 * @brief set the vcpu to running state, then it will be scheculed.
672 *
673 * Adds a vCPU into the run queue and make a reschedule request for it. It sets the vCPU state to VCPU_RUNNING.
674 *
675 * @param[inout] vcpu pointer to vcpu data structure
676 * @pre vcpu != NULL
677 * @pre vcpu->state == VCPU_INIT
678 */
679 void launch_vcpu(struct acrn_vcpu *vcpu);
680
681 /**
682 * @brief kick the vcpu and let it handle pending events
683 *
684 * Kick a vCPU to handle the pending events.
685 *
686 * @param[in] vcpu pointer to vcpu data structure
687 */
688 void kick_vcpu(struct acrn_vcpu *vcpu);
689
690 /**
691 * @brief create a vcpu for the vm and mapped to the pcpu.
692 *
693 * Create a vcpu for the vm, and mapped to the pcpu.
694 *
695 * @param[inout] vm pointer to vm data structure
696 * @param[in] pcpu_id which the vcpu will be mapped
697 *
698 * @retval 0 on success
699 * @retval -EINVAL if the vCPU ID is invalid
700 */
701 int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id);
702
703 /**
704 * @brief get physical destination cpu mask
705 *
706 * get the corresponding physical destination cpu mask for the vm and virtual destination cpu mask
707 *
708 * @param[in] vm pointer to vm data structure
709 * @param[in] vdmask virtual destination cpu mask
710 *
711 * @return The physical destination CPU mask
712 */
713 uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask);
714
715 /*
716 * @brief Check if vCPU uses LAPIC in x2APIC mode and the VM, vCPU belongs to, is configured for
717 * LAPIC Pass-through
718 *
719 * @pre vcpu != NULL
720 *
721 * @return true, if vCPU LAPIC is in x2APIC mode and VM, vCPU belongs to, is configured for
722 * LAPIC Pass-through
723 */
is_lapic_pt_enabled(struct acrn_vcpu * vcpu)724 static inline bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu)
725 {
726 return vcpu->arch.lapic_pt_enabled;
727 }
728
729 /**
730 * @brief handle posted interrupts
731 *
732 * VT-d PI handler, find the corresponding vCPU for this IRQ,
733 * if the associated PID's bit ON is set, wake it up.
734 *
735 * @param[in] vcpu_index a zero based index of where the vCPU is located in the vCPU list for current pCPU
736 * @pre vcpu_index < CONFIG_MAX_VM_NUM
737 */
738 void vcpu_handle_pi_notification(uint32_t vcpu_index);
739
740 /*
741 * @brief Update the state of vCPU and state of vlapic
742 *
743 * The vlapic state of VM shall be updated for some vCPU
744 * state update cases, such as from VCPU_INIT to VCPU_RUNNING.
745
746 * @pre (vcpu != NULL)
747 */
748 void vcpu_set_state(struct acrn_vcpu *vcpu, enum vcpu_state new_state);
749
750 /**
751 * @}
752 */
753 /* End of acrn_vcpu */
754
755 #endif /* ASSEMBLER */
756
757 #endif /* VCPU_H */
758