Lines Matching refs:svm
66 static void avic_activate_vmcb(struct vcpu_svm *svm) in avic_activate_vmcb() argument
68 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb()
82 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) { in avic_activate_vmcb()
86 svm_set_x2apic_msr_interception(svm, false); in avic_activate_vmcb()
92 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu); in avic_activate_vmcb()
97 svm_set_x2apic_msr_interception(svm, true); in avic_activate_vmcb()
101 static void avic_deactivate_vmcb(struct vcpu_svm *svm) in avic_deactivate_vmcb() argument
103 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb()
112 if (is_guest_mode(&svm->vcpu) && in avic_deactivate_vmcb()
113 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)) in avic_deactivate_vmcb()
117 svm_set_x2apic_msr_interception(svm, true); in avic_deactivate_vmcb()
225 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb) in avic_init_vmcb() argument
227 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
228 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
237 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
238 avic_activate_vmcb(svm); in avic_init_vmcb()
240 avic_deactivate_vmcb(svm); in avic_init_vmcb()
262 struct vcpu_svm *svm = to_svm(vcpu); in avic_init_backing_page() local
285 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs); in avic_init_backing_page()
292 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
297 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
473 struct vcpu_svm *svm = to_svm(vcpu); in avic_incomplete_ipi_interception() local
474 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
475 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
476 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
477 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF; in avic_incomplete_ipi_interception()
575 struct vcpu_svm *svm = to_svm(vcpu); in avic_invalidate_logical_id_entry() local
576 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
583 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
590 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_ldr_update() local
598 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
603 svm->ldr_reg = ldr; in avic_handle_ldr_update()
609 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_dfr_update() local
612 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
616 svm->dfr_reg = dfr; in avic_handle_dfr_update()
673 struct vcpu_svm *svm = to_svm(vcpu); in avic_unaccelerated_access_interception() local
675 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
677 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
679 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
697 int avic_init_vcpu(struct vcpu_svm *svm) in avic_init_vcpu() argument
700 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
709 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
710 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
711 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
727 struct vcpu_svm *svm = to_svm(vcpu); in avic_set_pi_irte_mode() local
736 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
738 if (list_empty(&svm->ir_list)) in avic_set_pi_irte_mode()
741 list_for_each_entry(ir, &svm->ir_list, node) { in avic_set_pi_irte_mode()
750 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
754 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_del() argument
759 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
760 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
767 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
770 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_add() argument
782 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
807 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
808 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
809 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
827 struct vcpu_data *vcpu_info, struct vcpu_svm **svm) in get_pi_vcpu_info() argument
843 *svm = to_svm(vcpu); in get_pi_vcpu_info()
844 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
885 struct vcpu_svm *svm = NULL; in avic_pi_update_irte() local
897 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && in avic_pi_update_irte()
898 kvm_vcpu_apicv_active(&svm->vcpu)) { in avic_pi_update_irte()
902 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in avic_pi_update_irte()
905 svm->vcpu.vcpu_id); in avic_pi_update_irte()
918 svm_ir_list_add(svm, &pi); in avic_pi_update_irte()
948 if (!ret && svm) { in avic_pi_update_irte()
949 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in avic_pi_update_irte()
972 struct vcpu_svm *svm = to_svm(vcpu); in avic_update_iommu_vcpu_affinity() local
981 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
983 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
986 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
992 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
1000 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_load() local
1017 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
1024 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
1031 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_put() local
1035 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
1044 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
1049 struct vcpu_svm *svm = to_svm(vcpu); in avic_refresh_virtual_apic_mode() local
1050 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_refresh_virtual_apic_mode()
1064 avic_activate_vmcb(svm); in avic_refresh_virtual_apic_mode()
1066 avic_deactivate_vmcb(svm); in avic_refresh_virtual_apic_mode()