Lines Matching refs:hv_vcpu

223 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);  in kvm_hv_notify_acked_sint()  local
230 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
231 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
248 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in synic_exit() local
250 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
251 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
252 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
253 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
254 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
330 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_is_syndbg_enabled() local
332 return hv_vcpu->cpuid_cache.syndbg_cap_eax & in kvm_hv_is_syndbg_enabled()
349 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in syndbg_exit() local
351 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
352 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
353 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
354 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
355 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
356 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
690 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in stimer_set_config() local
696 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && in stimer_set_config()
697 !(hv_vcpu->cpuid_cache.features_edx & in stimer_set_config()
863 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_process_stimers() local
868 if (!hv_vcpu) in kvm_hv_process_stimers()
871 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
872 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
873 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
896 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_vcpu_uninit() local
899 if (!hv_vcpu) in kvm_hv_vcpu_uninit()
902 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
903 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
905 kfree(hv_vcpu); in kvm_hv_vcpu_uninit()
911 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_assist_page_enabled() local
913 if (!hv_vcpu) in kvm_hv_assist_page_enabled()
916 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
924 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_assist_page() local
926 if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu)) in kvm_hv_get_assist_page()
930 &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); in kvm_hv_get_assist_page()
960 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_vcpu_init() local
963 if (hv_vcpu) in kvm_hv_vcpu_init()
966 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); in kvm_hv_vcpu_init()
967 if (!hv_vcpu) in kvm_hv_vcpu_init()
970 vcpu->arch.hyperv = hv_vcpu; in kvm_hv_vcpu_init()
971 hv_vcpu->vcpu = vcpu; in kvm_hv_vcpu_init()
973 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
975 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
976 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
977 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
979 hv_vcpu->vp_index = vcpu->vcpu_idx; in kvm_hv_vcpu_init()
982 INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries); in kvm_hv_vcpu_init()
983 spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock); in kvm_hv_vcpu_init()
1247 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) in hv_check_msr_access() argument
1249 if (!hv_vcpu->enforce_cpuid) in hv_check_msr_access()
1255 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1258 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1261 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1264 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1267 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1270 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1278 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1288 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1294 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1299 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1304 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1307 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1311 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1315 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1469 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_msr() local
1471 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) in kvm_hv_set_msr()
1482 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1491 if (hv_vcpu->vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1496 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1504 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1521 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1538 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1639 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_msr() local
1641 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) in kvm_hv_get_msr()
1646 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1655 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1658 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1884 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in hv_tlb_flush_enqueue() local
1887 if (!hv_vcpu) in hv_tlb_flush_enqueue()
1915 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_vcpu_flush_tlb() local
1920 if (!tdp_enabled || !hv_vcpu) in kvm_hv_vcpu_flush_tlb()
1952 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_flush_tlb() local
1953 u64 *sparse_banks = hv_vcpu->sparse_banks; in kvm_hv_flush_tlb()
2122 if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id) in kvm_hv_flush_tlb()
2168 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_send_ipi() local
2169 u64 *sparse_banks = hv_vcpu->sparse_banks; in kvm_hv_send_ipi()
2248 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_cpuid() local
2253 if (!hv_vcpu) { in kvm_hv_set_cpuid()
2262 memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache)); in kvm_hv_set_cpuid()
2269 hv_vcpu->cpuid_cache.features_eax = entry->eax; in kvm_hv_set_cpuid()
2270 hv_vcpu->cpuid_cache.features_ebx = entry->ebx; in kvm_hv_set_cpuid()
2271 hv_vcpu->cpuid_cache.features_edx = entry->edx; in kvm_hv_set_cpuid()
2276 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; in kvm_hv_set_cpuid()
2277 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; in kvm_hv_set_cpuid()
2282 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; in kvm_hv_set_cpuid()
2286 hv_vcpu->cpuid_cache.nested_eax = entry->eax; in kvm_hv_set_cpuid()
2287 hv_vcpu->cpuid_cache.nested_ebx = entry->ebx; in kvm_hv_set_cpuid()
2293 struct kvm_vcpu_hv *hv_vcpu; in kvm_hv_set_enforce_cpuid() local
2306 hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_enforce_cpuid()
2307 hv_vcpu->enforce_cpuid = enforce; in kvm_hv_set_enforce_cpuid()
2418 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code) in hv_check_hypercall_access() argument
2420 if (!hv_vcpu->enforce_cpuid) in hv_check_hypercall_access()
2425 return hv_vcpu->cpuid_cache.enlightenments_ebx && in hv_check_hypercall_access()
2426 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; in hv_check_hypercall_access()
2428 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; in hv_check_hypercall_access()
2430 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; in hv_check_hypercall_access()
2438 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) || in hv_check_hypercall_access()
2439 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; in hv_check_hypercall_access()
2442 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2448 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2451 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2456 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2459 return hv_vcpu->cpuid_cache.features_ebx & in hv_check_hypercall_access()
2470 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_hypercall() local
2509 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { in kvm_hv_hypercall()
2520 if (unlikely(hv_vcpu->enforce_cpuid && in kvm_hv_hypercall()
2521 !(hv_vcpu->cpuid_cache.features_edx & in kvm_hv_hypercall()