1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
23
24 #include <linux/kvm_host.h>
25 #include "x86.h"
26
27 /* "Hv#1" signature */
28 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
29
30 /*
31 * The #defines related to the synthetic debugger are required by KDNet, but
32 * they are not documented in the Hyper-V TLFS because the synthetic debugger
33 * functionality has been deprecated and is subject to removal in future
34 * versions of Windows.
35 */
36 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
37 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
38 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
39
40 /*
41 * Hyper-V synthetic debugger platform capabilities
42 * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
43 */
44 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
45
46 /* Hyper-V Synthetic debug options MSR */
47 #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
48 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
49 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
50 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
51 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
52 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
53
54 /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
55 #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
56
to_kvm_hv(struct kvm * kvm)57 static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
58 {
59 return &kvm->arch.hyperv;
60 }
61
to_hv_vcpu(struct kvm_vcpu * vcpu)62 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
63 {
64 return vcpu->arch.hyperv;
65 }
66
to_hv_synic(struct kvm_vcpu * vcpu)67 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
68 {
69 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
70
71 return &hv_vcpu->synic;
72 }
73
hv_synic_to_vcpu(struct kvm_vcpu_hv_synic * synic)74 static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
75 {
76 struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
77
78 return hv_vcpu->vcpu;
79 }
80
to_hv_syndbg(struct kvm_vcpu * vcpu)81 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
82 {
83 return &vcpu->kvm->arch.hyperv.hv_syndbg;
84 }
85
kvm_hv_get_vpindex(struct kvm_vcpu * vcpu)86 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
87 {
88 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
89
90 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
91 }
92
93 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
94 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
95
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)96 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
97 {
98 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
99 }
100
101 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
102
103 void kvm_hv_irq_routing_update(struct kvm *kvm);
104 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
105 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
106 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
107
108 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
109
110 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
111 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
112
to_hv_stimer(struct kvm_vcpu * vcpu,int timer_index)113 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
114 int timer_index)
115 {
116 return &to_hv_vcpu(vcpu)->stimer[timer_index];
117 }
118
hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer * stimer)119 static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
120 {
121 struct kvm_vcpu_hv *hv_vcpu;
122
123 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
124 stimer[0]);
125 return hv_vcpu->vcpu;
126 }
127
kvm_hv_has_stimer_pending(struct kvm_vcpu * vcpu)128 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
129 {
130 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
131
132 if (!hv_vcpu)
133 return false;
134
135 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
136 HV_SYNIC_STIMER_COUNT);
137 }
138
139 /*
140 * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8])
141 * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to.
142 */
kvm_hv_invtsc_suppressed(struct kvm_vcpu * vcpu)143 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
144 {
145 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
146
147 /*
148 * If Hyper-V's invariant TSC control is not exposed to the guest,
149 * the invariant TSC CPUID flag is not suppressed, Windows guests were
150 * observed to be able to handle it correctly. Going forward, VMMs are
151 * encouraged to enable Hyper-V's invariant TSC control when invariant
152 * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V.
153 */
154 if (!hv_vcpu ||
155 !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT))
156 return false;
157
158 /*
159 * If Hyper-V's invariant TSC control is exposed to the guest, KVM is
160 * responsible for suppressing the invariant TSC CPUID flag if the
161 * Hyper-V control is not enabled.
162 */
163 return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC);
164 }
165
166 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
167
168 void kvm_hv_setup_tsc_page(struct kvm *kvm,
169 struct pvclock_vcpu_time_info *hv_clock);
170 void kvm_hv_request_tsc_page_update(struct kvm *kvm);
171
172 void kvm_hv_init_vm(struct kvm *kvm);
173 void kvm_hv_destroy_vm(struct kvm *kvm);
174 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
175 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
176 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
177 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
178 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
179 struct kvm_cpuid_entry2 __user *entries);
180
kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu * vcpu,bool is_guest_mode)181 static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu,
182 bool is_guest_mode)
183 {
184 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
185 int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
186 HV_L1_TLB_FLUSH_FIFO;
187
188 return &hv_vcpu->tlb_flush_fifo[i];
189 }
190
kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu * vcpu)191 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
192 {
193 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
194
195 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
196 return;
197
198 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
199
200 kfifo_reset_out(&tlb_flush_fifo->entries);
201 }
202
guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu * vcpu)203 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
204 {
205 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
206
207 return hv_vcpu &&
208 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH);
209 }
210
kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu * vcpu)211 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
212 {
213 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
214 u16 code;
215
216 if (!hv_vcpu)
217 return false;
218
219 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) :
220 kvm_rax_read(vcpu);
221
222 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
223 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
224 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
225 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX);
226 }
227
kvm_hv_verify_vp_assist(struct kvm_vcpu * vcpu)228 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
229 {
230 if (!to_hv_vcpu(vcpu))
231 return 0;
232
233 if (!kvm_hv_assist_page_enabled(vcpu))
234 return 0;
235
236 return kvm_hv_get_assist_page(vcpu);
237 }
238
239 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
240
241 #endif
242