Lines Matching refs:svm
565 static int sev_es_sync_vmsa(struct vcpu_svm *svm) in sev_es_sync_vmsa() argument
567 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa()
570 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
579 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
582 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa()
583 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa()
584 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa()
585 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa()
586 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa()
587 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa()
588 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; in sev_es_sync_vmsa()
589 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; in sev_es_sync_vmsa()
591 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; in sev_es_sync_vmsa()
592 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; in sev_es_sync_vmsa()
593 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; in sev_es_sync_vmsa()
594 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; in sev_es_sync_vmsa()
595 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; in sev_es_sync_vmsa()
596 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; in sev_es_sync_vmsa()
597 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; in sev_es_sync_vmsa()
598 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; in sev_es_sync_vmsa()
600 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; in sev_es_sync_vmsa()
603 save->xcr0 = svm->vcpu.arch.xcr0; in sev_es_sync_vmsa()
604 save->pkru = svm->vcpu.arch.pkru; in sev_es_sync_vmsa()
605 save->xss = svm->vcpu.arch.ia32_xss; in sev_es_sync_vmsa()
606 save->dr6 = svm->vcpu.arch.dr6; in sev_es_sync_vmsa()
618 struct vcpu_svm *svm = to_svm(vcpu); in __sev_launch_update_vmsa() local
622 ret = sev_es_sync_vmsa(svm); in __sev_launch_update_vmsa()
631 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); in __sev_launch_update_vmsa()
635 vmsa.address = __sme_pa(svm->sev_es.vmsa); in __sev_launch_update_vmsa()
2333 struct vcpu_svm *svm; in sev_free_vcpu() local
2338 svm = to_svm(vcpu); in sev_free_vcpu()
2341 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa); in sev_free_vcpu()
2343 __free_page(virt_to_page(svm->sev_es.vmsa)); in sev_free_vcpu()
2345 if (svm->sev_es.ghcb_sa_free) in sev_free_vcpu()
2346 kvfree(svm->sev_es.ghcb_sa); in sev_free_vcpu()
2349 static void dump_ghcb(struct vcpu_svm *svm) in dump_ghcb() argument
2351 struct ghcb *ghcb = svm->sev_es.ghcb; in dump_ghcb()
2362 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
2374 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) in sev_es_sync_to_ghcb() argument
2376 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_to_ghcb()
2377 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_to_ghcb()
2393 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) in sev_es_sync_from_ghcb() argument
2395 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
2396 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_from_ghcb()
2397 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_from_ghcb()
2420 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); in sev_es_sync_from_ghcb()
2438 static int sev_es_validate_vmgexit(struct vcpu_svm *svm) in sev_es_validate_vmgexit() argument
2445 ghcb = svm->sev_es.ghcb; in sev_es_validate_vmgexit()
2546 vcpu = &svm->vcpu; in sev_es_validate_vmgexit()
2557 dump_ghcb(svm); in sev_es_validate_vmgexit()
2570 void sev_es_unmap_ghcb(struct vcpu_svm *svm) in sev_es_unmap_ghcb() argument
2572 if (!svm->sev_es.ghcb) in sev_es_unmap_ghcb()
2575 if (svm->sev_es.ghcb_sa_free) { in sev_es_unmap_ghcb()
2581 if (svm->sev_es.ghcb_sa_sync) { in sev_es_unmap_ghcb()
2582 kvm_write_guest(svm->vcpu.kvm, in sev_es_unmap_ghcb()
2583 ghcb_get_sw_scratch(svm->sev_es.ghcb), in sev_es_unmap_ghcb()
2584 svm->sev_es.ghcb_sa, in sev_es_unmap_ghcb()
2585 svm->sev_es.ghcb_sa_len); in sev_es_unmap_ghcb()
2586 svm->sev_es.ghcb_sa_sync = false; in sev_es_unmap_ghcb()
2589 kvfree(svm->sev_es.ghcb_sa); in sev_es_unmap_ghcb()
2590 svm->sev_es.ghcb_sa = NULL; in sev_es_unmap_ghcb()
2591 svm->sev_es.ghcb_sa_free = false; in sev_es_unmap_ghcb()
2594 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); in sev_es_unmap_ghcb()
2596 sev_es_sync_to_ghcb(svm); in sev_es_unmap_ghcb()
2598 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true); in sev_es_unmap_ghcb()
2599 svm->sev_es.ghcb = NULL; in sev_es_unmap_ghcb()
2602 void pre_sev_run(struct vcpu_svm *svm, int cpu) in pre_sev_run() argument
2605 int asid = sev_get_asid(svm->vcpu.kvm); in pre_sev_run()
2608 svm->asid = asid; in pre_sev_run()
2616 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
2617 svm->vcpu.arch.last_vmentry_cpu == cpu) in pre_sev_run()
2620 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
2621 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
2622 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
2626 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) in setup_vmgexit_scratch() argument
2628 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch()
2629 struct ghcb *ghcb = svm->sev_es.ghcb; in setup_vmgexit_scratch()
2665 scratch_va = (void *)svm->sev_es.ghcb; in setup_vmgexit_scratch()
2681 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { in setup_vmgexit_scratch()
2695 svm->sev_es.ghcb_sa_sync = sync; in setup_vmgexit_scratch()
2696 svm->sev_es.ghcb_sa_free = true; in setup_vmgexit_scratch()
2699 svm->sev_es.ghcb_sa = scratch_va; in setup_vmgexit_scratch()
2700 svm->sev_es.ghcb_sa_len = len; in setup_vmgexit_scratch()
2711 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, in set_ghcb_msr_bits() argument
2714 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits()
2715 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; in set_ghcb_msr_bits()
2718 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos) in get_ghcb_msr_bits() argument
2720 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; in get_ghcb_msr_bits()
2723 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value) in set_ghcb_msr() argument
2725 svm->vmcb->control.ghcb_gpa = value; in set_ghcb_msr()
2728 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) in sev_handle_vmgexit_msr_protocol() argument
2730 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit_msr_protocol()
2731 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_handle_vmgexit_msr_protocol()
2737 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
2742 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, in sev_handle_vmgexit_msr_protocol()
2749 cpuid_fn = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2763 cpuid_reg = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2775 set_ghcb_msr_bits(svm, cpuid_value, in sev_handle_vmgexit_msr_protocol()
2779 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP, in sev_handle_vmgexit_msr_protocol()
2787 reason_set = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2790 reason_code = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2808 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
2816 struct vcpu_svm *svm = to_svm(vcpu); in sev_handle_vmgexit() local
2817 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit()
2825 return sev_handle_vmgexit_msr_protocol(svm); in sev_handle_vmgexit()
2834 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { in sev_handle_vmgexit()
2843 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
2844 ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
2850 ret = sev_es_validate_vmgexit(svm); in sev_handle_vmgexit()
2854 sev_es_sync_from_ghcb(svm); in sev_handle_vmgexit()
2860 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
2867 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
2870 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); in sev_handle_vmgexit()
2877 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
2920 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) in sev_es_string_io() argument
2926 if (svm->vmcb->control.exit_info_2 > INT_MAX) in sev_es_string_io()
2929 count = svm->vmcb->control.exit_info_2; in sev_es_string_io()
2933 r = setup_vmgexit_scratch(svm, in, bytes); in sev_es_string_io()
2937 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, in sev_es_string_io()
2941 static void sev_es_init_vmcb(struct vcpu_svm *svm) in sev_es_init_vmcb() argument
2943 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_init_vmcb()
2945 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; in sev_es_init_vmcb()
2946 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in sev_es_init_vmcb()
2953 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); in sev_es_init_vmcb()
2956 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in sev_es_init_vmcb()
2957 svm_clr_intercept(svm, INTERCEPT_CR4_READ); in sev_es_init_vmcb()
2958 svm_clr_intercept(svm, INTERCEPT_CR8_READ); in sev_es_init_vmcb()
2959 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in sev_es_init_vmcb()
2960 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE); in sev_es_init_vmcb()
2961 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in sev_es_init_vmcb()
2963 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0); in sev_es_init_vmcb()
2966 svm_set_intercept(svm, TRAP_EFER_WRITE); in sev_es_init_vmcb()
2967 svm_set_intercept(svm, TRAP_CR0_WRITE); in sev_es_init_vmcb()
2968 svm_set_intercept(svm, TRAP_CR4_WRITE); in sev_es_init_vmcb()
2969 svm_set_intercept(svm, TRAP_CR8_WRITE); in sev_es_init_vmcb()
2972 clr_exception_intercept(svm, GP_VECTOR); in sev_es_init_vmcb()
2975 svm_clr_intercept(svm, INTERCEPT_XSETBV); in sev_es_init_vmcb()
2978 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); in sev_es_init_vmcb()
2979 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); in sev_es_init_vmcb()
2980 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in sev_es_init_vmcb()
2981 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in sev_es_init_vmcb()
2982 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in sev_es_init_vmcb()
2983 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in sev_es_init_vmcb()
2986 (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) || in sev_es_init_vmcb()
2987 guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) { in sev_es_init_vmcb()
2988 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1); in sev_es_init_vmcb()
2989 if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP)) in sev_es_init_vmcb()
2990 svm_clr_intercept(svm, INTERCEPT_RDTSCP); in sev_es_init_vmcb()
2994 void sev_init_vmcb(struct vcpu_svm *svm) in sev_init_vmcb() argument
2996 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in sev_init_vmcb()
2997 clr_exception_intercept(svm, UD_VECTOR); in sev_init_vmcb()
2999 if (sev_es_guest(svm->vcpu.kvm)) in sev_init_vmcb()
3000 sev_es_init_vmcb(svm); in sev_init_vmcb()
3003 void sev_es_vcpu_reset(struct vcpu_svm *svm) in sev_es_vcpu_reset() argument
3009 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, in sev_es_vcpu_reset()
3035 struct vcpu_svm *svm = to_svm(vcpu); in sev_vcpu_deliver_sipi_vector() local
3038 if (!svm->sev_es.received_first_sipi) { in sev_vcpu_deliver_sipi_vector()
3039 svm->sev_es.received_first_sipi = true; in sev_vcpu_deliver_sipi_vector()
3048 if (!svm->sev_es.ghcb) in sev_vcpu_deliver_sipi_vector()
3051 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); in sev_vcpu_deliver_sipi_vector()