1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * hvm.h: Hardware virtual machine assist interface definitions.
4  *
5  * Leendert van Doorn, leendert@watson.ibm.com
6  * Copyright (c) 2005, International Business Machines Corporation.
7  */
8 
9 #ifndef __ASM_X86_HVM_HVM_H__
10 #define __ASM_X86_HVM_HVM_H__
11 
12 #include <xen/alternative-call.h>
13 #include <xen/mm.h>
14 
15 #include <asm/asm_defns.h>
16 #include <asm/current.h>
17 #include <asm/hvm/asid.h>
18 #include <asm/msr-index.h>
19 #include <asm/x86_emulate.h>
20 
21 struct pirq; /* needed by pi_update_irte */
22 
23 #ifdef CONFIG_HVM_FEP
24 /* Permit use of the Forced Emulation Prefix in HVM guests */
25 extern bool opt_hvm_fep;
26 #else
27 #define opt_hvm_fep 0
28 #endif
29 
30 /*
31  * Results for hvm_guest_x86_mode().
32  *
33  * Note, some callers depend on the order of these constants.
34  *
35  * TODO: Rework hvm_guest_x86_mode() to avoid mixing the architectural
36  * concepts of mode and operand size.
37  */
38 #define X86_MODE_REAL  0
39 #define X86_MODE_VM86  1
40 #define X86_MODE_16BIT 2
41 #define X86_MODE_32BIT 4
42 #define X86_MODE_64BIT 8
43 
44 /* Interrupt acknowledgement sources. */
45 enum hvm_intsrc {
46     hvm_intsrc_none,
47     hvm_intsrc_pic,
48     hvm_intsrc_lapic,
49     hvm_intsrc_nmi,
50     hvm_intsrc_mce,
51     hvm_intsrc_vector
52 };
53 struct hvm_intack {
54     uint8_t source; /* enum hvm_intsrc */
55     uint8_t vector;
56 };
57 #define hvm_intack(src, vec)   ((struct hvm_intack) { hvm_intsrc_##src, vec })
58 #define hvm_intack_none        hvm_intack(none, 0)
59 #define hvm_intack_pic(vec)    hvm_intack(pic, vec)
60 #define hvm_intack_lapic(vec)  hvm_intack(lapic, vec)
61 #define hvm_intack_nmi         hvm_intack(nmi, 2)
62 #define hvm_intack_mce         hvm_intack(mce, 18)
63 #define hvm_intack_vector(vec) hvm_intack(vector, vec)
64 enum hvm_intblk {
65     hvm_intblk_none,      /* not blocked (deliverable) */
66     hvm_intblk_shadow,    /* MOV-SS or STI shadow */
67     hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
68     hvm_intblk_tpr,       /* LAPIC TPR too high */
69     hvm_intblk_nmi_iret,  /* NMI blocked until IRET */
70     hvm_intblk_arch,      /* SVM/VMX specific reason */
71 };
72 
73 /* These happen to be the same as the VMX interrupt shadow definitions. */
74 #define HVM_INTR_SHADOW_STI    0x00000001
75 #define HVM_INTR_SHADOW_MOV_SS 0x00000002
76 #define HVM_INTR_SHADOW_SMI    0x00000004
77 #define HVM_INTR_SHADOW_NMI    0x00000008
78 
79 #define HVM_EVENT_VECTOR_UNSET    (-1)
80 #define HVM_EVENT_VECTOR_UPDATING (-2)
81 
82 /* update_guest_cr() flags. */
83 #define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001
84 
85 struct hvm_vcpu_nonreg_state {
86     union {
87         struct {
88             uint64_t activity_state;
89             uint64_t interruptibility_info;
90             uint64_t pending_dbg;
91             uint64_t interrupt_status;
92         } vmx;
93     };
94 };
95 
96 /*
97  * The hardware virtual machine (HVM) interface abstracts away from the
98  * x86/x86_64 CPU virtualization assist specifics. Currently this interface
99  * supports Intel's VT-x and AMD's SVM extensions.
100  */
101 struct hvm_function_table {
102     const char *name;
103 
104     struct {
105         /* Indicate HAP capabilities. */
106         bool hap:1,
107              hap_superpage_1gb:1,
108              hap_superpage_2mb:1,
109 
110              /* Altp2m capabilities */
111              altp2m:1,
112              singlestep:1,
113 
114              /* Hardware virtual interrupt delivery enable? */
115              virtual_intr_delivery:1,
116 
117              /* Nested virt capabilities */
118              nested_virt:1;
119     } caps;
120 
121     /*
122      * Initialise/destroy HVM domain/vcpu resources
123      */
124     int  (*domain_initialise)(struct domain *d);
125     void (*domain_creation_finished)(struct domain *d);
126     void (*domain_relinquish_resources)(struct domain *d);
127     void (*domain_destroy)(struct domain *d);
128     int  (*vcpu_initialise)(struct vcpu *v);
129     void (*vcpu_destroy)(struct vcpu *v);
130 
131     /* save and load hvm guest cpu context for save/restore */
132     void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
133     int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
134 
135     /* Examine specifics of the guest state. */
136     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
137     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
138     void (*get_nonreg_state)(struct vcpu *v,
139                              struct hvm_vcpu_nonreg_state *nrs);
140     void (*set_nonreg_state)(struct vcpu *v,
141                              struct hvm_vcpu_nonreg_state *nrs);
142     int (*guest_x86_mode)(struct vcpu *v);
143     unsigned int (*get_cpl)(struct vcpu *v);
144     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
145                                  struct segment_register *reg);
146     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
147                                  struct segment_register *reg);
148 
149     /*
150      * Re-set the value of CR3 that Xen runs on when handling VM exits.
151      */
152     void (*update_host_cr3)(struct vcpu *v);
153 
154     /*
155      * Called to inform HVM layer that a guest CRn or EFER has changed.
156      */
157     void (*update_guest_cr)(struct vcpu *v, unsigned int cr,
158                             unsigned int flags);
159     void (*update_guest_efer)(struct vcpu *v);
160 
161     void (*cpuid_policy_changed)(struct vcpu *v);
162 
163     void (*fpu_leave)(struct vcpu *v);
164 
165     int  (*get_guest_pat)(struct vcpu *v, uint64_t *gpat);
166     int  (*set_guest_pat)(struct vcpu *v, uint64_t gpat);
167 
168     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
169 
170     void (*inject_event)(const struct x86_event *event);
171 
172     void (*init_hypercall_page)(void *ptr);
173 
174     bool (*event_pending)(const struct vcpu *v);
175     bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
176     void (*invlpg)(struct vcpu *v, unsigned long linear);
177 
178     int  (*cpu_up_prepare)(unsigned int cpu);
179     void (*cpu_dead)(unsigned int cpu);
180 
181     int  (*cpu_up)(void);
182     void (*cpu_down)(void);
183 
184     /* Copy up to 15 bytes from cached instruction bytes at current rIP. */
185     unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
186 
187     /* Instruction intercepts: non-void return values are X86EMUL codes. */
188     void (*wbinvd_intercept)(void);
189     void (*fpu_dirty_intercept)(void);
190     int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
191     int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
192     void (*handle_cd)(struct vcpu *v, unsigned long value);
193     void (*set_info_guest)(struct vcpu *v);
194     void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
195     void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
196 
197     /* Nested HVM */
198     int (*nhvm_vcpu_initialise)(struct vcpu *v);
199     void (*nhvm_vcpu_destroy)(struct vcpu *v);
200     int (*nhvm_vcpu_reset)(struct vcpu *v);
201     int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event);
202     uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
203     bool (*nhvm_vmcx_guest_intercepts_event)(
204         struct vcpu *v, unsigned int vector, int errcode);
205 
206     bool (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
207 
208     enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
209     void (*nhvm_domain_relinquish_resources)(struct domain *d);
210 
211     /* Virtual interrupt delivery */
212     void (*update_eoi_exit_bitmap)(struct vcpu *v, uint8_t vector, bool set);
213     void (*process_isr)(int isr, struct vcpu *v);
214     void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
215     void (*sync_pir_to_irr)(struct vcpu *v);
216     bool (*test_pir)(const struct vcpu *v, uint8_t vector);
217     void (*handle_eoi)(uint8_t vector, int isr);
218     int (*pi_update_irte)(const struct vcpu *v, const struct pirq *pirq,
219                           uint8_t gvec);
220     void (*update_vlapic_mode)(struct vcpu *v);
221 
222     /*Walk nested p2m  */
223     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
224                                 paddr_t *L1_gpa, unsigned int *page_order,
225                                 uint8_t *p2m_acc, struct npfec npfec);
226 
227     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
228 
229     /* Alternate p2m */
230     void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
231     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
232     bool (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
233     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
234 
235     /* vmtrace */
236     int (*vmtrace_control)(struct vcpu *v, bool enable, bool reset);
237     int (*vmtrace_output_position)(struct vcpu *v, uint64_t *pos);
238     int (*vmtrace_set_option)(struct vcpu *v, uint64_t key, uint64_t value);
239     int (*vmtrace_get_option)(struct vcpu *v, uint64_t key, uint64_t *value);
240     int (*vmtrace_reset)(struct vcpu *v);
241 
242     uint64_t (*get_reg)(struct vcpu *v, unsigned int reg);
243     void (*set_reg)(struct vcpu *v, unsigned int reg, uint64_t val);
244 
245     /*
246      * Parameters and callbacks for hardware-assisted TSC scaling,
247      * which are valid only when the hardware feature is available.
248      */
249     struct {
250         /* number of bits of the fractional part of TSC scaling ratio */
251         uint8_t  ratio_frac_bits;
252         /* maximum-allowed TSC scaling ratio */
253         uint64_t max_ratio;
254     } tsc_scaling;
255 };
256 
257 extern struct hvm_function_table hvm_funcs;
258 extern bool hvm_enabled;
259 extern int8_t hvm_port80_allowed;
260 
261 extern const struct hvm_function_table *start_svm(void);
262 extern const struct hvm_function_table *start_vmx(void);
263 
264 void svm_fill_funcs(void);
265 void vmx_fill_funcs(void);
266 
267 int hvm_domain_initialise(struct domain *d,
268                           const struct xen_domctl_createdomain *config);
269 void hvm_domain_relinquish_resources(struct domain *d);
270 void hvm_domain_destroy(struct domain *d);
271 
272 int hvm_vcpu_initialise(struct vcpu *v);
273 void hvm_vcpu_destroy(struct vcpu *v);
274 void hvm_vcpu_down(struct vcpu *v);
275 int hvm_vcpu_cacheattr_init(struct vcpu *v);
276 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
277 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
278 
279 void hvm_get_guest_pat(struct vcpu *v, uint64_t *guest_pat);
280 int hvm_set_guest_pat(struct vcpu *v, uint64_t guest_pat);
281 
282 uint64_t hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc);
283 
284 u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
285 u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
286 
287 void hvm_init_guest_time(struct domain *d);
288 void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
289 uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc);
290 
291 int vmsi_deliver(
292     struct domain *d, int vector,
293     uint8_t dest, uint8_t dest_mode,
294     uint8_t delivery_mode, uint8_t trig_mode);
295 struct hvm_pirq_dpci;
296 void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci);
297 int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
298 
299 enum hvm_intblk
300 hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
301 
302 void hvm_init_hypercall_page(struct domain *d, void *ptr);
303 
304 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
305                               struct segment_register *reg);
306 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
307                               struct segment_register *reg);
308 
309 void hvm_set_info_guest(struct vcpu *v);
310 
311 int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len);
312 void hvm_migrate_timers(struct vcpu *v);
313 void hvm_do_resume(struct vcpu *v);
314 void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, const struct vcpu *v);
315 void hvm_migrate_pirqs(struct vcpu *v);
316 
317 void hvm_inject_event(const struct x86_event *event);
318 
319 int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
320 
321 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
322 
323 void hvm_set_rdtsc_exiting(struct domain *d, bool enable);
324 
325 enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
326 void hvm_task_switch(
327     uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
328     int32_t errcode, unsigned int insn_len, unsigned int extra_eflags);
329 
330 enum hvm_access_type {
331     hvm_access_insn_fetch,
332     hvm_access_none,
333     hvm_access_read,
334     hvm_access_write
335 };
336 
337 bool hvm_vcpu_virtual_to_linear(
338     struct vcpu *v,
339     enum x86_segment seg,
340     const struct segment_register *reg,
341     unsigned long offset,
342     unsigned int bytes,
343     enum hvm_access_type access_type,
344     const struct segment_register *active_cs,
345     unsigned long *linear_addr);
346 
hvm_virtual_to_linear_addr(enum x86_segment seg,const struct segment_register * reg,unsigned long offset,unsigned int bytes,enum hvm_access_type access_type,const struct segment_register * active_cs,unsigned long * linear)347 static inline bool hvm_virtual_to_linear_addr(
348     enum x86_segment seg,
349     const struct segment_register *reg,
350     unsigned long offset,
351     unsigned int bytes,
352     enum hvm_access_type access_type,
353     const struct segment_register *active_cs,
354     unsigned long *linear)
355 {
356     return hvm_vcpu_virtual_to_linear(current, seg, reg, offset, bytes,
357                                       access_type, active_cs, linear);
358 }
359 
360 void *hvm_map_guest_frame_rw(unsigned long gfn, bool permanent,
361                              bool *writable);
362 void *hvm_map_guest_frame_ro(unsigned long gfn, bool permanent);
363 void hvm_unmap_guest_frame(void *p, bool permanent);
364 void hvm_mapped_guest_frames_mark_dirty(struct domain *d);
365 
366 int hvm_debug_op(struct vcpu *v, int32_t op);
367 
368 /* Caller should pause vcpu before calling this function */
369 void hvm_toggle_singlestep(struct vcpu *v);
370 void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
371 
372 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
373                               struct npfec npfec);
374 
375 /* Check CR4/EFER values */
376 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
377                            signed int cr0_pg);
378 unsigned long hvm_cr4_guest_valid_bits(const struct domain *d);
379 
380 int hvm_copy_context_and_params(struct domain *dst, struct domain *src);
381 
382 int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value);
383 
using_vmx(void)384 static inline bool using_vmx(void)
385 {
386     return IS_ENABLED(CONFIG_INTEL_VMX) && cpu_has_vmx;
387 }
388 
using_svm(void)389 static inline bool using_svm(void)
390 {
391     return IS_ENABLED(CONFIG_AMD_SVM) && cpu_has_svm;
392 }
393 
394 #ifdef CONFIG_HVM
395 
396 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
397 
398 #define hvm_tsc_scaling_supported \
399     (!!hvm_funcs.tsc_scaling.ratio_frac_bits)
400 
401 #define hvm_default_tsc_scaling_ratio \
402     (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)
403 
404 #define hvm_tsc_scaling_ratio(d) \
405     ((d)->arch.hvm.tsc_scaling_ratio)
406 
407 #define hvm_get_guest_time(v) hvm_get_guest_time_fixed(v, 0)
408 
409 #define hvm_paging_enabled(v) \
410     (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG))
411 #define hvm_wp_enabled(v) \
412     (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP))
413 #define hvm_pcid_enabled(v) \
414     (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE))
415 #define hvm_pae_enabled(v) \
416     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE))
417 #define hvm_smep_enabled(v) \
418     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP))
419 #define hvm_smap_enabled(v) \
420     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP))
421 #define hvm_nx_enabled(v) \
422     ((v)->arch.hvm.guest_efer & EFER_NXE)
423 #define hvm_pku_enabled(v) \
424     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
425 #define hvm_pks_enabled(v) \
426     (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKS))
427 
428 /* Can we use superpages in the HAP p2m table? */
429 #define hap_has_1gb hvm_funcs.caps.hap_superpage_1gb
430 #define hap_has_2mb hvm_funcs.caps.hap_superpage_2mb
431 
432 #define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
433 
hvm_has_set_descriptor_access_exiting(void)434 static inline bool hvm_has_set_descriptor_access_exiting(void)
435 {
436     return hvm_funcs.set_descriptor_access_exiting;
437 }
438 
hvm_domain_creation_finished(struct domain * d)439 static inline void hvm_domain_creation_finished(struct domain *d)
440 {
441     if ( hvm_funcs.domain_creation_finished )
442         alternative_vcall(hvm_funcs.domain_creation_finished, d);
443 }
444 
445 static inline int
hvm_guest_x86_mode(struct vcpu * v)446 hvm_guest_x86_mode(struct vcpu *v)
447 {
448     ASSERT(v == current);
449     return alternative_call(hvm_funcs.guest_x86_mode, v);
450 }
451 
452 static inline void
hvm_update_host_cr3(struct vcpu * v)453 hvm_update_host_cr3(struct vcpu *v)
454 {
455     if ( hvm_funcs.update_host_cr3 )
456         alternative_vcall(hvm_funcs.update_host_cr3, v);
457 }
458 
hvm_update_guest_cr(struct vcpu * v,unsigned int cr)459 static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
460 {
461     alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0);
462 }
463 
hvm_update_guest_cr3(struct vcpu * v,bool noflush)464 static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
465 {
466     unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
467 
468     alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags);
469 }
470 
hvm_update_guest_efer(struct vcpu * v)471 static inline void hvm_update_guest_efer(struct vcpu *v)
472 {
473     alternative_vcall(hvm_funcs.update_guest_efer, v);
474 }
475 
hvm_cpuid_policy_changed(struct vcpu * v)476 static inline void hvm_cpuid_policy_changed(struct vcpu *v)
477 {
478     alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
479 }
480 
hvm_set_tsc_offset(struct vcpu * v,uint64_t offset,uint64_t at_tsc)481 static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
482                                       uint64_t at_tsc)
483 {
484     alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
485 }
486 
487 /*
488  * Called to ensure than all guest-specific mappings in a tagged TLB are
489  * flushed; does *not* flush Xen's TLB entries, and on processors without a
490  * tagged TLB it will be a noop.
491  */
hvm_flush_guest_tlbs(void)492 static inline void hvm_flush_guest_tlbs(void)
493 {
494     if ( hvm_enabled )
495         hvm_asid_flush_core();
496 }
497 
498 static inline unsigned int
hvm_get_cpl(struct vcpu * v)499 hvm_get_cpl(struct vcpu *v)
500 {
501     return alternative_call(hvm_funcs.get_cpl, v);
502 }
503 
504 #define has_hvm_params(d) \
505     ((d)->arch.hvm.params != NULL)
506 
507 #define viridian_feature_mask(d) \
508     (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
509 
510 #define is_viridian_domain(d) \
511     (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
512 
513 #define is_viridian_vcpu(v) \
514     is_viridian_domain((v)->domain)
515 
516 #define has_viridian_time_ref_count(d) \
517     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count))
518 
519 #define has_viridian_apic_assist(d) \
520     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
521 
522 #define has_viridian_synic(d) \
523     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic))
524 
hvm_inject_exception(unsigned int vector,unsigned int type,unsigned int insn_len,int error_code)525 static inline void hvm_inject_exception(
526     unsigned int vector, unsigned int type,
527     unsigned int insn_len, int error_code)
528 {
529     struct x86_event event = {
530         .vector = vector,
531         .type = type,
532         .insn_len = insn_len,
533         .error_code = error_code,
534     };
535 
536     hvm_inject_event(&event);
537 }
538 
hvm_inject_hw_exception(unsigned int vector,int errcode)539 static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
540 {
541     struct x86_event event = {
542         .vector = vector,
543         .type = X86_ET_HW_EXC,
544         .error_code = errcode,
545     };
546 
547     hvm_inject_event(&event);
548 }
549 
hvm_inject_page_fault(int errcode,unsigned long cr2)550 static inline void hvm_inject_page_fault(int errcode, unsigned long cr2)
551 {
552     struct x86_event event = {
553         .vector = X86_EXC_PF,
554         .type = X86_ET_HW_EXC,
555         .error_code = errcode,
556     };
557 
558     event.cr2 = cr2;
559 
560     hvm_inject_event(&event);
561 }
562 
hvm_event_pending(const struct vcpu * v)563 static inline bool hvm_event_pending(const struct vcpu *v)
564 {
565     return alternative_call(hvm_funcs.event_pending, v);
566 }
567 
hvm_invlpg(struct vcpu * v,unsigned long linear)568 static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
569 {
570     alternative_vcall(hvm_funcs.invlpg, v, linear);
571 }
572 
573 /* These bits in CR4 are owned by the host. */
574 #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
575     (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
576 
577 /* These exceptions must always be intercepted. */
578 #define HVM_TRAP_MASK ((1U << X86_EXC_DB)           | \
579                        (1U << X86_EXC_AC) | \
580                        (1U << X86_EXC_MC))
581 
582 /* Called in boot/resume paths.  Must cope with no HVM support. */
hvm_cpu_up(void)583 static inline int hvm_cpu_up(void)
584 {
585     if ( hvm_funcs.cpu_up )
586         return alternative_call(hvm_funcs.cpu_up);
587 
588     return 0;
589 }
590 
591 /* Called in shutdown paths.  Must cope with no HVM support. */
hvm_cpu_down(void)592 static inline void hvm_cpu_down(void)
593 {
594     if ( hvm_funcs.cpu_down )
595         alternative_vcall(hvm_funcs.cpu_down);
596 }
597 
hvm_get_insn_bytes(struct vcpu * v,uint8_t * buf)598 static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
599 {
600     return (hvm_funcs.get_insn_bytes
601             ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
602 }
603 
hvm_sanitize_regs_fields(struct cpu_user_regs * regs,bool compat)604 static inline void hvm_sanitize_regs_fields(struct cpu_user_regs *regs,
605                                             bool compat)
606 {
607     if ( compat )
608     {
609         /* Clear GPR upper halves, to counteract guests playing games. */
610         regs->rbp = (uint32_t)regs->rbp;
611         regs->rbx = (uint32_t)regs->rbx;
612         regs->rax = (uint32_t)regs->rax;
613         regs->rcx = (uint32_t)regs->rcx;
614         regs->rdx = (uint32_t)regs->rdx;
615         regs->rsi = (uint32_t)regs->rsi;
616         regs->rdi = (uint32_t)regs->rdi;
617         regs->rip = (uint32_t)regs->rip;
618         regs->rflags = (uint32_t)regs->rflags;
619         regs->rsp = (uint32_t)regs->rsp;
620     }
621 
622 #ifndef NDEBUG
623     regs->error_code = 0xbeef;
624     regs->entry_vector = 0xbeef;
625     regs->saved_upcall_mask = 0xbf;
626     regs->cs = 0xbeef;
627     regs->ss = 0xbeef;
628 #endif
629 }
630 
631 /*
632  * Nested HVM
633  */
634 
635 /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
636  * 'trapnr' exception.
637  */
nhvm_vcpu_vmexit_event(struct vcpu * v,const struct x86_event * event)638 static inline int nhvm_vcpu_vmexit_event(
639     struct vcpu *v, const struct x86_event *event)
640 {
641     return alternative_call(hvm_funcs.nhvm_vcpu_vmexit_event, v, event);
642 }
643 
644 /* returns l1 guest's cr3 that points to the page table used to
645  * translate l2 guest physical address to l1 guest physical address.
646  */
nhvm_vcpu_p2m_base(struct vcpu * v)647 static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
648 {
649     return alternative_call(hvm_funcs.nhvm_vcpu_p2m_base, v);
650 }
651 
652 /* returns true, when l1 guest intercepts the specified trap */
nhvm_vmcx_guest_intercepts_event(struct vcpu * v,unsigned int vector,int errcode)653 static inline bool nhvm_vmcx_guest_intercepts_event(
654     struct vcpu *v, unsigned int vector, int errcode)
655 {
656     return alternative_call(hvm_funcs.nhvm_vmcx_guest_intercepts_event, v,
657                             vector, errcode);
658 }
659 
660 /* returns true when l1 guest wants to use hap to run l2 guest */
nhvm_vmcx_hap_enabled(struct vcpu * v)661 static inline bool nhvm_vmcx_hap_enabled(struct vcpu *v)
662 {
663     return alternative_call(hvm_funcs.nhvm_vmcx_hap_enabled, v);
664 }
665 
666 /* interrupt */
nhvm_interrupt_blocked(struct vcpu * v)667 static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
668 {
669     return alternative_call(hvm_funcs.nhvm_intr_blocked, v);
670 }
671 
nhvm_hap_walk_L1_p2m(struct vcpu * v,paddr_t L2_gpa,paddr_t * L1_gpa,unsigned int * page_order,uint8_t * p2m_acc,struct npfec npfec)672 static inline int nhvm_hap_walk_L1_p2m(
673     struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
674     uint8_t *p2m_acc, struct npfec npfec)
675 {
676     return alternative_call(hvm_funcs.nhvm_hap_walk_L1_p2m,
677         v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
678 }
679 
hvm_enable_msr_interception(struct domain * d,uint32_t msr)680 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
681 {
682     alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
683 }
684 
hvm_is_singlestep_supported(void)685 static inline bool hvm_is_singlestep_supported(void)
686 {
687     return hvm_funcs.caps.singlestep;
688 }
689 
hvm_hap_supported(void)690 static inline bool hvm_hap_supported(void)
691 {
692     return hvm_funcs.caps.hap;
693 }
694 
695 /* returns true if hardware supports alternate p2m's */
hvm_altp2m_supported(void)696 static inline bool hvm_altp2m_supported(void)
697 {
698     return IS_ENABLED(CONFIG_ALTP2M) && hvm_funcs.caps.altp2m;
699 }
700 
701 /* Returns true if we have the minimum hardware requirements for nested virt */
hvm_nested_virt_supported(void)702 static inline bool hvm_nested_virt_supported(void)
703 {
704     return hvm_funcs.caps.nested_virt;
705 }
706 
707 /* updates the current hardware p2m */
altp2m_vcpu_update_p2m(struct vcpu * v)708 static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
709 {
710     if ( hvm_funcs.altp2m_vcpu_update_p2m )
711         alternative_vcall(hvm_funcs.altp2m_vcpu_update_p2m, v);
712 }
713 
714 /* updates VMCS fields related to VMFUNC and #VE */
altp2m_vcpu_update_vmfunc_ve(struct vcpu * v)715 static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
716 {
717     if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
718         alternative_vcall(hvm_funcs.altp2m_vcpu_update_vmfunc_ve, v);
719 }
720 
721 /* emulates #VE */
altp2m_vcpu_emulate_ve(struct vcpu * v)722 static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
723 {
724     if ( hvm_funcs.altp2m_vcpu_emulate_ve )
725     {
726         alternative_vcall(hvm_funcs.altp2m_vcpu_emulate_ve, v);
727         return true;
728     }
729     return false;
730 }
731 
hvm_vmtrace_control(struct vcpu * v,bool enable,bool reset)732 static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
733 {
734     if ( hvm_funcs.vmtrace_control )
735         return alternative_call(hvm_funcs.vmtrace_control, v, enable, reset);
736 
737     return -EOPNOTSUPP;
738 }
739 
740 /* Returns -errno, or a boolean of whether tracing is currently active. */
hvm_vmtrace_output_position(struct vcpu * v,uint64_t * pos)741 static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
742 {
743     if ( hvm_funcs.vmtrace_output_position )
744         return alternative_call(hvm_funcs.vmtrace_output_position, v, pos);
745 
746     return -EOPNOTSUPP;
747 }
748 
hvm_vmtrace_set_option(struct vcpu * v,uint64_t key,uint64_t value)749 static inline int hvm_vmtrace_set_option(
750     struct vcpu *v, uint64_t key, uint64_t value)
751 {
752     if ( hvm_funcs.vmtrace_set_option )
753         return alternative_call(hvm_funcs.vmtrace_set_option, v, key, value);
754 
755     return -EOPNOTSUPP;
756 }
757 
hvm_vmtrace_get_option(struct vcpu * v,uint64_t key,uint64_t * value)758 static inline int hvm_vmtrace_get_option(
759     struct vcpu *v, uint64_t key, uint64_t *value)
760 {
761     if ( hvm_funcs.vmtrace_get_option )
762         return alternative_call(hvm_funcs.vmtrace_get_option, v, key, value);
763 
764     return -EOPNOTSUPP;
765 }
766 
hvm_vmtrace_reset(struct vcpu * v)767 static inline int hvm_vmtrace_reset(struct vcpu *v)
768 {
769     if ( hvm_funcs.vmtrace_reset )
770         return alternative_call(hvm_funcs.vmtrace_reset, v);
771 
772     return -EOPNOTSUPP;
773 }
774 
775 /*
776  * Accessors for registers which have per-guest-type or per-vendor locations
777  * (e.g. VMCS, msr load/save lists, VMCB, VMLOAD lazy, etc).
778  *
779  * The caller is responsible for all auditing - these accessors do not fail,
780  * but do use domain_crash() for usage errors.
781  *
782  * Must cope with being called in non-current context.
783  */
784 uint64_t hvm_get_reg(struct vcpu *v, unsigned int reg);
785 void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val);
786 
787 /*
788  * This must be defined as a macro instead of an inline function,
789  * because it uses 'struct vcpu' and 'struct domain' which have
790  * not been defined yet.
791  */
792 #define arch_vcpu_block(v) ({                                   \
793     struct vcpu *v_ = (v);                                      \
794     struct domain *d_ = v_->domain;                             \
795     if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block )  \
796         d_->arch.hvm.pi_ops.vcpu_block(v_);                     \
797 })
798 
hvm_get_nonreg_state(struct vcpu * v,struct hvm_vcpu_nonreg_state * nrs)799 static inline void hvm_get_nonreg_state(struct vcpu *v,
800                                         struct hvm_vcpu_nonreg_state *nrs)
801 {
802     if ( hvm_funcs.get_nonreg_state )
803         alternative_vcall(hvm_funcs.get_nonreg_state, v, nrs);
804 }
805 
hvm_set_nonreg_state(struct vcpu * v,struct hvm_vcpu_nonreg_state * nrs)806 static inline void hvm_set_nonreg_state(struct vcpu *v,
807                                         struct hvm_vcpu_nonreg_state *nrs)
808 {
809     if ( hvm_funcs.set_nonreg_state )
810         alternative_vcall(hvm_funcs.set_nonreg_state, v, nrs);
811 }
812 
hvm_pi_update_irte(const struct vcpu * v,const struct pirq * pirq,uint8_t gvec)813 static inline int hvm_pi_update_irte(const struct vcpu *v,
814                                      const struct pirq *pirq, uint8_t gvec)
815 {
816     return alternative_call(hvm_funcs.pi_update_irte, v, pirq, gvec);
817 }
818 
hvm_update_vlapic_mode(struct vcpu * v)819 static inline void hvm_update_vlapic_mode(struct vcpu *v)
820 {
821     if ( hvm_funcs.update_vlapic_mode )
822         alternative_vcall(hvm_funcs.update_vlapic_mode, v);
823 }
824 
hvm_sync_pir_to_irr(struct vcpu * v)825 static inline void hvm_sync_pir_to_irr(struct vcpu *v)
826 {
827     if ( hvm_funcs.sync_pir_to_irr )
828         alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
829 }
830 
831 #else  /* CONFIG_HVM */
832 
833 #define hvm_enabled false
834 
835 /*
836  * List of inline functions above, of which only declarations are
837  * needed because DCE will kick in.
838  */
839 int hvm_guest_x86_mode(struct vcpu *v);
840 void hvm_cpuid_policy_changed(struct vcpu *v);
841 void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
842 
843 /* End of prototype list */
844 
845 /* Called by code in other header  */
hvm_is_singlestep_supported(void)846 static inline bool hvm_is_singlestep_supported(void)
847 {
848     return false;
849 }
850 
hvm_hap_supported(void)851 static inline bool hvm_hap_supported(void)
852 {
853     return false;
854 }
855 
hvm_altp2m_supported(void)856 static inline bool hvm_altp2m_supported(void)
857 {
858     return false;
859 }
860 
hvm_nested_virt_supported(void)861 static inline bool hvm_nested_virt_supported(void)
862 {
863     return false;
864 }
865 
nhvm_vmcx_hap_enabled(const struct vcpu * v)866 static inline bool nhvm_vmcx_hap_enabled(const struct vcpu *v)
867 {
868     ASSERT_UNREACHABLE();
869     return false;
870 }
871 
872 
873 /* Called by common code */
hvm_cpu_up(void)874 static inline int hvm_cpu_up(void)
875 {
876     return 0;
877 }
878 
hvm_cpu_down(void)879 static inline void hvm_cpu_down(void) {}
880 
hvm_flush_guest_tlbs(void)881 static inline void hvm_flush_guest_tlbs(void) {}
882 
hvm_invlpg(const struct vcpu * v,unsigned long linear)883 static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear)
884 {
885     ASSERT_UNREACHABLE();
886 }
887 
hvm_domain_creation_finished(struct domain * d)888 static inline void hvm_domain_creation_finished(struct domain *d)
889 {
890     ASSERT_UNREACHABLE();
891 }
892 
893 /*
894  * Shadow code needs further cleanup to eliminate some HVM-only paths. For
895  * now provide the stubs here but assert they will never be reached.
896  */
hvm_update_host_cr3(const struct vcpu * v)897 static inline void hvm_update_host_cr3(const struct vcpu *v)
898 {
899     ASSERT_UNREACHABLE();
900 }
901 
hvm_update_guest_cr3(const struct vcpu * v,bool noflush)902 static inline void hvm_update_guest_cr3(const struct vcpu *v, bool noflush)
903 {
904     ASSERT_UNREACHABLE();
905 }
906 
hvm_get_cpl(const struct vcpu * v)907 static inline unsigned int hvm_get_cpl(const struct vcpu *v)
908 {
909     ASSERT_UNREACHABLE();
910     return -1;
911 }
912 
hvm_event_pending(const struct vcpu * v)913 static inline bool hvm_event_pending(const struct vcpu *v)
914 {
915     return false;
916 }
917 
hvm_inject_hw_exception(unsigned int vector,int errcode)918 static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
919 {
920     ASSERT_UNREACHABLE();
921 }
922 
hvm_has_set_descriptor_access_exiting(void)923 static inline bool hvm_has_set_descriptor_access_exiting(void)
924 {
925     return false;
926 }
927 
hvm_vmtrace_control(struct vcpu * v,bool enable,bool reset)928 static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
929 {
930     return -EOPNOTSUPP;
931 }
932 
hvm_vmtrace_output_position(struct vcpu * v,uint64_t * pos)933 static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
934 {
935     return -EOPNOTSUPP;
936 }
937 
hvm_vmtrace_set_option(struct vcpu * v,uint64_t key,uint64_t value)938 static inline int hvm_vmtrace_set_option(
939     struct vcpu *v, uint64_t key, uint64_t value)
940 {
941     return -EOPNOTSUPP;
942 }
943 
hvm_vmtrace_get_option(struct vcpu * v,uint64_t key,uint64_t * value)944 static inline int hvm_vmtrace_get_option(
945     struct vcpu *v, uint64_t key, uint64_t *value)
946 {
947     return -EOPNOTSUPP;
948 }
949 
hvm_get_reg(struct vcpu * v,unsigned int reg)950 static inline uint64_t hvm_get_reg(struct vcpu *v, unsigned int reg)
951 {
952     ASSERT_UNREACHABLE();
953     return 0;
954 }
hvm_set_reg(struct vcpu * v,unsigned int reg,uint64_t val)955 static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
956 {
957     ASSERT_UNREACHABLE();
958 }
959 
960 #define is_viridian_domain(d) ((void)(d), false)
961 #define is_viridian_vcpu(v) ((void)(v), false)
962 #define has_viridian_time_ref_count(d) ((void)(d), false)
963 #define hvm_long_mode_active(v) ((void)(v), false)
964 #define hvm_get_guest_time(v) ((void)(v), 0)
965 
966 #define hvm_tsc_scaling_supported false
967 #define hap_has_1gb false
968 #define hap_has_2mb false
969 
970 #define hvm_paging_enabled(v) ((void)(v), false)
971 #define hvm_wp_enabled(v) ((void)(v), false)
972 #define hvm_pcid_enabled(v) ((void)(v), false)
973 #define hvm_pae_enabled(v) ((void)(v), false)
974 #define hvm_smep_enabled(v) ((void)(v), false)
975 #define hvm_smap_enabled(v) ((void)(v), false)
976 #define hvm_nx_enabled(v) ((void)(v), false)
977 #define hvm_pku_enabled(v) ((void)(v), false)
978 #define hvm_pks_enabled(v) ((void)(v), false)
979 
980 #define arch_vcpu_block(v) ((void)(v))
981 
982 #endif  /* CONFIG_HVM */
983 
984 #endif /* __ASM_X86_HVM_HVM_H__ */
985 
986 /*
987  * Local variables:
988  * mode: C
989  * c-file-style: "BSD"
990  * c-basic-offset: 4
991  * tab-width: 4
992  * indent-tabs-mode: nil
993  * End:
994  */
995