Lines Matching refs:vcpu

22 static void clear_vvmcs(struct acrn_vcpu *vcpu, struct acrn_vvmcs *vvmcs);
80 void init_vmx_msrs(struct acrn_vcpu *vcpu) in init_vmx_msrs() argument
85 if (is_nvmx_configured(vcpu->vm)) { in init_vmx_msrs()
92 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_BASIC, val64.full); in init_vmx_msrs()
109 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_MISC, val64.full); in init_vmx_msrs()
124 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_PINBASED_CTLS, msr_value); in init_vmx_msrs()
126 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS, msr_value); in init_vmx_msrs()
139 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS, msr_value); in init_vmx_msrs()
141 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS, msr_value); in init_vmx_msrs()
151 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2, msr_value); in init_vmx_msrs()
158 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_EXIT_CTLS, msr_value); in init_vmx_msrs()
160 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_TRUE_EXIT_CTLS, msr_value); in init_vmx_msrs()
167 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_ENTRY_CTLS, msr_value); in init_vmx_msrs()
169 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS, msr_value); in init_vmx_msrs()
177 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_EPT_VPID_CAP, msr_value); in init_vmx_msrs()
181 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_CR0_FIXED0, msr_value); in init_vmx_msrs()
184 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_CR0_FIXED1, msr_value); in init_vmx_msrs()
187 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_CR4_FIXED0, msr_value); in init_vmx_msrs()
190 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_CR4_FIXED1, msr_value); in init_vmx_msrs()
193 vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, msr_value); in init_vmx_msrs()
200 int32_t read_vmx_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *val) in read_vmx_msr() argument
205 if (is_nvmx_configured(vcpu->vm)) { in read_vmx_msr()
225 v = vcpu_get_guest_msr(vcpu, msr); in read_vmx_msr()
563 static uint64_t get_vmx_memory_operand(struct acrn_vcpu *vcpu, uint32_t instr_info) in get_vmx_memory_operand() argument
575 offset = vcpu->arch.exit_qualification; in get_vmx_memory_operand()
585 offset += vcpu_get_gpreg(vcpu, VMX_II_BASE_REG(instr_info)); in get_vmx_memory_operand()
589 uint64_t val64 = vcpu_get_gpreg(vcpu, VMX_II_IDX_REG(instr_info)); in get_vmx_memory_operand()
609 (void)gva2gpa(vcpu, gva, &gpa, &err_code); in get_vmx_memory_operand()
617 static uint64_t get_vmptr_gpa(struct acrn_vcpu *vcpu) in get_vmptr_gpa() argument
622 gpa = get_vmx_memory_operand(vcpu, exec_vmread(VMX_INSTR_INFO)); in get_vmptr_gpa()
625 (void)copy_from_gpa(vcpu->vm, (void *)&vmptr, gpa, sizeof(uint64_t)); in get_vmptr_gpa()
641 static bool validate_vmcs_revision_id(struct acrn_vcpu *vcpu, uint64_t vmptr_gpa) in validate_vmcs_revision_id() argument
645 (void)copy_from_gpa(vcpu->vm, (void *)&revision_id, vmptr_gpa, sizeof(uint32_t)); in validate_vmcs_revision_id()
692 static bool validate_nvmx_cr0(struct acrn_vcpu *vcpu) in validate_nvmx_cr0() argument
694 return validate_nvmx_cr0_cr4(vcpu_get_cr0(vcpu), msr_read(MSR_IA32_VMX_CR0_FIXED0), in validate_nvmx_cr0()
701 static bool validate_nvmx_cr4(struct acrn_vcpu *vcpu) in validate_nvmx_cr4() argument
703 return validate_nvmx_cr0_cr4(vcpu_get_cr4(vcpu), msr_read(MSR_IA32_VMX_CR4_FIXED0), in validate_nvmx_cr4()
710 static void reset_vvmcs(struct acrn_vcpu *vcpu) in reset_vvmcs() argument
715 vcpu->arch.nested.current_vvmcs = NULL; in reset_vvmcs()
718 vvmcs = &vcpu->arch.nested.vvmcs[idx]; in reset_vvmcs()
732 int32_t vmxon_vmexit_handler(struct acrn_vcpu *vcpu) in vmxon_vmexit_handler() argument
737 if (is_nvmx_configured(vcpu->vm)) { in vmxon_vmexit_handler()
738 if (((vcpu_get_cr0(vcpu) & CR0_PE) == 0UL) in vmxon_vmexit_handler()
739 || ((vcpu_get_cr4(vcpu) & CR4_VMXE) == 0UL) in vmxon_vmexit_handler()
740 || ((vcpu_get_rflags(vcpu) & RFLAGS_VM) != 0U)) { in vmxon_vmexit_handler()
741 vcpu_inject_ud(vcpu); in vmxon_vmexit_handler()
742 } else if (((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LMA_BIT) == 0U) in vmxon_vmexit_handler()
745 vcpu_inject_ud(vcpu); in vmxon_vmexit_handler()
747 || !validate_nvmx_cr0(vcpu) in vmxon_vmexit_handler()
748 || !validate_nvmx_cr4(vcpu) in vmxon_vmexit_handler()
749 || ((vcpu_get_guest_msr(vcpu, MSR_IA32_FEATURE_CONTROL) & features) != features)) { in vmxon_vmexit_handler()
750 vcpu_inject_gp(vcpu, 0U); in vmxon_vmexit_handler()
751 } else if (vcpu->arch.nested.vmxon == true) { in vmxon_vmexit_handler()
754 uint64_t vmptr_gpa = get_vmptr_gpa(vcpu); in vmxon_vmexit_handler()
758 } else if (!validate_vmcs_revision_id(vcpu, vmptr_gpa)) { in vmxon_vmexit_handler()
761 vcpu->arch.nested.vmxon = true; in vmxon_vmexit_handler()
762 vcpu->arch.nested.in_l2_guest = false; in vmxon_vmexit_handler()
763 vcpu->arch.nested.vmxon_ptr = vmptr_gpa; in vmxon_vmexit_handler()
765 reset_vvmcs(vcpu); in vmxon_vmexit_handler()
770 vcpu_inject_ud(vcpu); in vmxon_vmexit_handler()
779 bool check_vmx_permission(struct acrn_vcpu *vcpu) in check_vmx_permission() argument
784 if ((vcpu->arch.nested.vmxon == false) in check_vmx_permission()
785 || ((vcpu_get_cr0(vcpu) & CR0_PE) == 0UL) in check_vmx_permission()
786 || ((vcpu_get_rflags(vcpu) & RFLAGS_VM) != 0U)) { in check_vmx_permission()
788 vcpu_inject_ud(vcpu); in check_vmx_permission()
791 vcpu_inject_gp(vcpu, 0U); in check_vmx_permission()
802 int32_t vmxoff_vmexit_handler(struct acrn_vcpu *vcpu) in vmxoff_vmexit_handler() argument
804 if (check_vmx_permission(vcpu)) { in vmxoff_vmexit_handler()
807 vcpu->arch.nested.vmxon = false; in vmxoff_vmexit_handler()
808 vcpu->arch.nested.in_l2_guest = false; in vmxoff_vmexit_handler()
810 reset_vvmcs(vcpu); in vmxoff_vmexit_handler()
831 static struct acrn_vvmcs *lookup_vvmcs(struct acrn_vcpu *vcpu, uint64_t vmcs12_gpa) in lookup_vvmcs() argument
837 if (vcpu->arch.nested.vvmcs[idx].vmcs12_gpa == vmcs12_gpa) { in lookup_vvmcs()
838 vvmcs = &vcpu->arch.nested.vvmcs[idx]; in lookup_vvmcs()
849 static struct acrn_vvmcs *get_or_replace_vvmcs_entry(struct acrn_vcpu *vcpu) in get_or_replace_vvmcs_entry() argument
851 struct acrn_nested *nested = &vcpu->arch.nested; in get_or_replace_vvmcs_entry()
874 clear_vvmcs(vcpu, vvmcs); in get_or_replace_vvmcs_entry()
889 int32_t vmread_vmexit_handler(struct acrn_vcpu *vcpu) in vmread_vmexit_handler() argument
891 struct acrn_vvmcs *cur_vvmcs = vcpu->arch.nested.current_vvmcs; in vmread_vmexit_handler()
896 if (check_vmx_permission(vcpu)) { in vmread_vmexit_handler()
901 vmcs_field = (uint32_t)vcpu_get_gpreg(vcpu, VMX_II_REG2(info)); in vmread_vmexit_handler()
906 vcpu_set_gpreg(vcpu, VMX_II_REG1(info), vmcs_value); in vmread_vmexit_handler()
908 gpa = get_vmx_memory_operand(vcpu, info); in vmread_vmexit_handler()
909 (void)copy_to_gpa(vcpu->vm, &vmcs_value, gpa, 8U); in vmread_vmexit_handler()
924 int32_t vmwrite_vmexit_handler(struct acrn_vcpu *vcpu) in vmwrite_vmexit_handler() argument
926 struct acrn_vvmcs *cur_vvmcs = vcpu->arch.nested.current_vvmcs; in vmwrite_vmexit_handler()
931 if (check_vmx_permission(vcpu)) { in vmwrite_vmexit_handler()
936 vmcs_field = (uint32_t)vcpu_get_gpreg(vcpu, VMX_II_REG2(info)); in vmwrite_vmexit_handler()
939 ((vcpu_get_guest_msr(vcpu, MSR_IA32_VMX_MISC) & (1UL << 29U)) == 0UL)) { in vmwrite_vmexit_handler()
944 vmcs_value = vcpu_get_gpreg(vcpu, VMX_II_REG1(info)); in vmwrite_vmexit_handler()
946 gpa = get_vmx_memory_operand(vcpu, info); in vmwrite_vmexit_handler()
947 (void)copy_from_gpa(vcpu->vm, &vmcs_value, gpa, 8U); in vmwrite_vmexit_handler()
1000 static void merge_and_sync_control_fields(struct acrn_vcpu *vcpu, struct acrn_vmcs12 *vmcs12) in merge_and_sync_control_fields() argument
1006 exec_vmwrite(VMX_MSR_BITMAP_FULL, gpa2hpa(vcpu->vm, vmcs12->msr_bitmap)); in merge_and_sync_control_fields()
1017 exec_vmwrite(VMX_GUEST_IA32_EFER_FULL, vcpu_get_efer(vcpu)); in merge_and_sync_control_fields()
1035 static void sync_vmcs12_to_vmcs02(struct acrn_vcpu *vcpu, struct acrn_vmcs12 *vmcs12) in sync_vmcs12_to_vmcs02() argument
1045 merge_and_sync_control_fields(vcpu, vmcs12); in sync_vmcs12_to_vmcs02()
1112 static void clear_vvmcs(struct acrn_vcpu *vcpu, struct acrn_vvmcs *vvmcs) in clear_vvmcs() argument
1130 (void)copy_to_gpa(vcpu->vm, (void *)&vvmcs->vmcs12, vvmcs->vmcs12_gpa, sizeof(struct acrn_vmcs12)); in clear_vvmcs()
1152 int32_t vmptrld_vmexit_handler(struct acrn_vcpu *vcpu) in vmptrld_vmexit_handler() argument
1154 struct acrn_nested *nested = &vcpu->arch.nested; in vmptrld_vmexit_handler()
1158 if (check_vmx_permission(vcpu)) { in vmptrld_vmexit_handler()
1159 vmcs12_gpa = get_vmptr_gpa(vcpu); in vmptrld_vmexit_handler()
1165 } else if (!validate_vmcs_revision_id(vcpu, vmcs12_gpa)) { in vmptrld_vmexit_handler()
1171 vvmcs = lookup_vvmcs(vcpu, vmcs12_gpa); in vmptrld_vmexit_handler()
1173 vvmcs = get_or_replace_vvmcs_entry(vcpu); in vmptrld_vmexit_handler()
1188 (void)copy_from_gpa(vcpu->vm, (void *)&vvmcs->vmcs12, vmcs12_gpa, in vmptrld_vmexit_handler()
1195 sync_vmcs12_to_vmcs02(vcpu, &vvmcs->vmcs12); in vmptrld_vmexit_handler()
1210 load_va_vmcs(vcpu->arch.vmcs); in vmptrld_vmexit_handler()
1227 int32_t vmclear_vmexit_handler(struct acrn_vcpu *vcpu) in vmclear_vmexit_handler() argument
1229 struct acrn_nested *nested = &vcpu->arch.nested; in vmclear_vmexit_handler()
1233 if (check_vmx_permission(vcpu)) { in vmclear_vmexit_handler()
1234 vmcs12_gpa = get_vmptr_gpa(vcpu); in vmclear_vmexit_handler()
1241 vvmcs = lookup_vvmcs(vcpu, vmcs12_gpa); in vmclear_vmexit_handler()
1252 clear_vvmcs(vcpu, vvmcs); in vmclear_vmexit_handler()
1255 load_va_vmcs(vcpu->arch.vmcs); in vmclear_vmexit_handler()
1278 (void)copy_to_gpa(vcpu->vm, &launch_state, vmcs12_gpa + in vmclear_vmexit_handler()
1292 bool is_vcpu_in_l2_guest(struct acrn_vcpu *vcpu) in is_vcpu_in_l2_guest() argument
1294 return vcpu->arch.nested.in_l2_guest; in is_vcpu_in_l2_guest()
1312 static void set_vmcs01_guest_state(struct acrn_vcpu *vcpu) in set_vmcs01_guest_state() argument
1329 struct acrn_vmcs12 *vmcs12 = &vcpu->arch.nested.current_vvmcs->vmcs12; in set_vmcs01_guest_state()
1332 if (vcpu->arch.nested.current_vvmcs->host_state_dirty == true) { in set_vmcs01_guest_state()
1333 vcpu->arch.nested.current_vvmcs->host_state_dirty = false; in set_vmcs01_guest_state()
1341 bitmap_clear_nolock(CPU_REG_CR0, &vcpu->reg_cached); in set_vmcs01_guest_state()
1342 bitmap_clear_nolock(CPU_REG_CR4, &vcpu->reg_cached); in set_vmcs01_guest_state()
1402 vcpu_set_rip(vcpu, vmcs12->host_rip); in set_vmcs01_guest_state()
1403 vcpu_set_rsp(vcpu, vmcs12->host_rsp); in set_vmcs01_guest_state()
1404 vcpu_set_rflags(vcpu, 0x2U); in set_vmcs01_guest_state()
1424 int32_t nested_vmexit_handler(struct acrn_vcpu *vcpu) in nested_vmexit_handler() argument
1426 struct acrn_vvmcs *cur_vvmcs = vcpu->arch.nested.current_vvmcs; in nested_vmexit_handler()
1429 if ((vcpu->arch.exit_reason & 0xFFFFU) == VMX_EXIT_REASON_EPT_VIOLATION) { in nested_vmexit_handler()
1430 is_l1_vmexit = handle_l2_ept_violation(vcpu); in nested_vmexit_handler()
1444 load_va_vmcs(vcpu->arch.vmcs); in nested_vmexit_handler()
1447 set_vmcs01_guest_state(vcpu); in nested_vmexit_handler()
1450 vcpu->arch.nested.in_l2_guest = false; in nested_vmexit_handler()
1463 vcpu_retain_rip(vcpu); in nested_vmexit_handler()
1471 static void nested_vmentry(struct acrn_vcpu *vcpu, bool is_launch) in nested_vmentry() argument
1473 struct acrn_vvmcs *cur_vvmcs = vcpu->arch.nested.current_vvmcs; in nested_vmentry()
1501 merge_and_sync_control_fields(vcpu, vmcs12); in nested_vmentry()
1505 vcpu->arch.nested.in_l2_guest = true; in nested_vmentry()
1517 vcpu->launched = false; in nested_vmentry()
1524 int32_t vmresume_vmexit_handler(struct acrn_vcpu *vcpu) in vmresume_vmexit_handler() argument
1526 if (check_vmx_permission(vcpu)) { in vmresume_vmexit_handler()
1527 nested_vmentry(vcpu, false); in vmresume_vmexit_handler()
1536 int32_t vmlaunch_vmexit_handler(struct acrn_vcpu *vcpu) in vmlaunch_vmexit_handler() argument
1538 if (check_vmx_permission(vcpu)) { in vmlaunch_vmexit_handler()
1539 nested_vmentry(vcpu, true); in vmlaunch_vmexit_handler()
1549 int64_t get_invvpid_ept_operands(struct acrn_vcpu *vcpu, void *desc, size_t size) in get_invvpid_ept_operands() argument
1554 gpa = get_vmx_memory_operand(vcpu, info); in get_invvpid_ept_operands()
1555 (void)copy_from_gpa(vcpu->vm, desc, gpa, size); in get_invvpid_ept_operands()
1557 return vcpu_get_gpreg(vcpu, VMX_II_REG2(info)); in get_invvpid_ept_operands()
1563 static bool validate_canonical_addr(struct acrn_vcpu *vcpu, uint64_t va) in validate_canonical_addr() argument
1568 if (vcpu_get_cr4(vcpu) & CR4_LA57) { in validate_canonical_addr()
1585 int32_t invvpid_vmexit_handler(struct acrn_vcpu *vcpu) in invvpid_vmexit_handler() argument
1587 uint32_t supported_types = (vcpu_get_guest_msr(vcpu, MSR_IA32_VMX_EPT_VPID_CAP) >> 40U) & 0xfU; in invvpid_vmexit_handler()
1591 if (check_vmx_permission(vcpu)) { in invvpid_vmexit_handler()
1592 type = get_invvpid_ept_operands(vcpu, (void *)&desc, sizeof(desc)); in invvpid_vmexit_handler()
1601 } else if ((type == VMX_VPID_TYPE_INDIVIDUAL_ADDR) && !validate_canonical_addr(vcpu, desc.gva)) { in invvpid_vmexit_handler()