1 /*
2  * hvm.h: Hardware virtual machine assist interface definitions.
3  *
4  * Leendert van Doorn, leendert@watson.ibm.com
5  * Copyright (c) 2005, International Business Machines Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef __ASM_X86_HVM_HVM_H__
21 #define __ASM_X86_HVM_HVM_H__
22 
23 #include <asm/current.h>
24 #include <asm/x86_emulate.h>
25 #include <asm/hvm/asid.h>
26 #include <public/domctl.h>
27 #include <public/hvm/save.h>
28 #include <xen/mm.h>
29 
30 #ifdef CONFIG_HVM_FEP
31 /* Permit use of the Forced Emulation Prefix in HVM guests */
32 extern bool_t opt_hvm_fep;
33 #else
34 #define opt_hvm_fep 0
35 #endif
36 
37 /* Interrupt acknowledgement sources. */
38 enum hvm_intsrc {
39     hvm_intsrc_none,
40     hvm_intsrc_pic,
41     hvm_intsrc_lapic,
42     hvm_intsrc_nmi,
43     hvm_intsrc_mce,
44     hvm_intsrc_vector
45 };
46 struct hvm_intack {
47     uint8_t source; /* enum hvm_intsrc */
48     uint8_t vector;
49 };
50 #define hvm_intack(src, vec)   ((struct hvm_intack) { hvm_intsrc_##src, vec })
51 #define hvm_intack_none        hvm_intack(none, 0)
52 #define hvm_intack_pic(vec)    hvm_intack(pic, vec)
53 #define hvm_intack_lapic(vec)  hvm_intack(lapic, vec)
54 #define hvm_intack_nmi         hvm_intack(nmi, 2)
55 #define hvm_intack_mce         hvm_intack(mce, 18)
56 #define hvm_intack_vector(vec) hvm_intack(vector, vec)
57 enum hvm_intblk {
58     hvm_intblk_none,      /* not blocked (deliverable) */
59     hvm_intblk_shadow,    /* MOV-SS or STI shadow */
60     hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
61     hvm_intblk_tpr,       /* LAPIC TPR too high */
62     hvm_intblk_nmi_iret,  /* NMI blocked until IRET */
63     hvm_intblk_arch,      /* SVM/VMX specific reason */
64 };
65 
66 /* These happen to be the same as the VMX interrupt shadow definitions. */
67 #define HVM_INTR_SHADOW_STI    0x00000001
68 #define HVM_INTR_SHADOW_MOV_SS 0x00000002
69 #define HVM_INTR_SHADOW_SMI    0x00000004
70 #define HVM_INTR_SHADOW_NMI    0x00000008
71 
72 /*
73  * HAP super page capabilities:
74  * bit0: if 2MB super page is allowed?
75  * bit1: if 1GB super page is allowed?
76  */
77 #define HVM_HAP_SUPERPAGE_2MB   0x00000001
78 #define HVM_HAP_SUPERPAGE_1GB   0x00000002
79 
80 #define HVM_EVENT_VECTOR_UNSET    (-1)
81 #define HVM_EVENT_VECTOR_UPDATING (-2)
82 
83 /*
84  * The hardware virtual machine (HVM) interface abstracts away from the
85  * x86/x86_64 CPU virtualization assist specifics. Currently this interface
86  * supports Intel's VT-x and AMD's SVM extensions.
87  */
88 struct hvm_function_table {
89     char *name;
90 
91     /* Support Hardware-Assisted Paging? */
92     bool_t hap_supported;
93 
94     /* Necessary hardware support for alternate p2m's? */
95     bool altp2m_supported;
96 
97     /* Indicate HAP capabilities. */
98     unsigned int hap_capabilities;
99 
100     /*
101      * Initialise/destroy HVM domain/vcpu resources
102      */
103     int  (*domain_initialise)(struct domain *d);
104     void (*domain_destroy)(struct domain *d);
105     int  (*vcpu_initialise)(struct vcpu *v);
106     void (*vcpu_destroy)(struct vcpu *v);
107 
108     /* save and load hvm guest cpu context for save/restore */
109     void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
110     int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
111 
112     unsigned int (*init_msr)(void);
113     void (*save_msr)(struct vcpu *, struct hvm_msr *);
114     int (*load_msr)(struct vcpu *, struct hvm_msr *);
115 
116     /* Examine specifics of the guest state. */
117     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
118     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
119     int (*guest_x86_mode)(struct vcpu *v);
120     unsigned int (*get_cpl)(struct vcpu *v);
121     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
122                                  struct segment_register *reg);
123     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
124                                  struct segment_register *reg);
125     unsigned long (*get_shadow_gs_base)(struct vcpu *v);
126 
127     /*
128      * Re-set the value of CR3 that Xen runs on when handling VM exits.
129      */
130     void (*update_host_cr3)(struct vcpu *v);
131 
132     /*
133      * Called to inform HVM layer that a guest CRn or EFER has changed.
134      */
135     void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
136     void (*update_guest_efer)(struct vcpu *v);
137 
138     void (*update_guest_vendor)(struct vcpu *v);
139 
140     void (*fpu_leave)(struct vcpu *v);
141 
142     int  (*get_guest_pat)(struct vcpu *v, u64 *);
143     int  (*set_guest_pat)(struct vcpu *v, u64);
144 
145     bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
146     bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
147 
148     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
149 
150     void (*inject_event)(const struct x86_event *event);
151 
152     void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
153 
154     int  (*event_pending)(struct vcpu *v);
155     bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
156     void (*invlpg)(struct vcpu *v, unsigned long vaddr);
157 
158     int  (*cpu_up_prepare)(unsigned int cpu);
159     void (*cpu_dead)(unsigned int cpu);
160 
161     int  (*cpu_up)(void);
162     void (*cpu_down)(void);
163 
164     /* Copy up to 15 bytes from cached instruction bytes at current rIP. */
165     unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);
166 
167     /* Instruction intercepts: non-void return values are X86EMUL codes. */
168     void (*wbinvd_intercept)(void);
169     void (*fpu_dirty_intercept)(void);
170     int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
171     int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
172     int (*vmfunc_intercept)(struct cpu_user_regs *regs);
173     void (*handle_cd)(struct vcpu *v, unsigned long value);
174     void (*set_info_guest)(struct vcpu *v);
175     void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
176     void (*set_descriptor_access_exiting)(struct vcpu *v, bool);
177 
178     /* Nested HVM */
179     int (*nhvm_vcpu_initialise)(struct vcpu *v);
180     void (*nhvm_vcpu_destroy)(struct vcpu *v);
181     int (*nhvm_vcpu_reset)(struct vcpu *v);
182     int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event);
183     uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
184     bool_t (*nhvm_vmcx_guest_intercepts_event)(
185         struct vcpu *v, unsigned int vector, int errcode);
186 
187     bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
188 
189     enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
190     void (*nhvm_domain_relinquish_resources)(struct domain *d);
191 
192     /* Virtual interrupt delivery */
193     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
194     int (*virtual_intr_delivery_enabled)(void);
195     void (*process_isr)(int isr, struct vcpu *v);
196     void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
197     void (*sync_pir_to_irr)(struct vcpu *v);
198     bool (*test_pir)(const struct vcpu *v, uint8_t vector);
199     void (*handle_eoi)(u8 vector);
200 
201     /*Walk nested p2m  */
202     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
203                                 paddr_t *L1_gpa, unsigned int *page_order,
204                                 uint8_t *p2m_acc, bool_t access_r,
205                                 bool_t access_w, bool_t access_x);
206 
207     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
208     bool_t (*is_singlestep_supported)(void);
209     int (*set_mode)(struct vcpu *v, int mode);
210 
211     /* Alternate p2m */
212     void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
213     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
214     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
215     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
216 
217     /*
218      * Parameters and callbacks for hardware-assisted TSC scaling,
219      * which are valid only when the hardware feature is available.
220      */
221     struct {
222         /* number of bits of the fractional part of TSC scaling ratio */
223         uint8_t  ratio_frac_bits;
224         /* maximum-allowed TSC scaling ratio */
225         uint64_t max_ratio;
226 
227         /* Architecture function to setup TSC scaling ratio */
228         void (*setup)(struct vcpu *v);
229     } tsc_scaling;
230 };
231 
232 extern struct hvm_function_table hvm_funcs;
233 extern bool_t hvm_enabled;
234 extern bool_t cpu_has_lmsl;
235 extern s8 hvm_port80_allowed;
236 
237 extern const struct hvm_function_table *start_svm(void);
238 extern const struct hvm_function_table *start_vmx(void);
239 
240 int hvm_domain_initialise(struct domain *d, unsigned long domcr_flags,
241                           struct xen_arch_domainconfig *config);
242 void hvm_domain_relinquish_resources(struct domain *d);
243 void hvm_domain_destroy(struct domain *d);
244 void hvm_domain_soft_reset(struct domain *d);
245 
246 int hvm_vcpu_initialise(struct vcpu *v);
247 void hvm_vcpu_destroy(struct vcpu *v);
248 void hvm_vcpu_down(struct vcpu *v);
249 int hvm_vcpu_cacheattr_init(struct vcpu *v);
250 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
251 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
252 
253 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
254 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
255 
256 u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);
257 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
258 
259 #define hvm_tsc_scaling_supported \
260     (!!hvm_funcs.tsc_scaling.ratio_frac_bits)
261 
262 #define hvm_default_tsc_scaling_ratio \
263     (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)
264 
265 #define hvm_tsc_scaling_ratio(d) \
266     ((d)->arch.hvm_domain.tsc_scaling_ratio)
267 
268 u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
269 u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
270 
271 int hvm_set_mode(struct vcpu *v, int mode);
272 void hvm_init_guest_time(struct domain *d);
273 void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
274 u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc);
275 #define hvm_get_guest_time(v) hvm_get_guest_time_fixed(v, 0)
276 
277 int vmsi_deliver(
278     struct domain *d, int vector,
279     uint8_t dest, uint8_t dest_mode,
280     uint8_t delivery_mode, uint8_t trig_mode);
281 struct hvm_pirq_dpci;
282 void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *);
283 int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
284 
285 #define hvm_paging_enabled(v) \
286     (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
287 #define hvm_wp_enabled(v) \
288     (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
289 #define hvm_pcid_enabled(v) \
290     (!!((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PCIDE))
291 #define hvm_pae_enabled(v) \
292     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
293 #define hvm_smep_enabled(v) \
294     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
295 #define hvm_smap_enabled(v) \
296     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
297 /* HVM guests on Intel hardware leak Xen's NX settings into guest context. */
298 #define hvm_nx_enabled(v) \
299     ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && cpu_has_nx) ||    \
300      ((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
301 #define hvm_pku_enabled(v) \
302     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE))
303 
304 /* Can we use superpages in the HAP p2m table? */
305 #define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
306 #define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
307 
308 #define hvm_long_mode_active(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_LMA))
309 
310 enum hvm_intblk
311 hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
312 
313 static inline int
hvm_guest_x86_mode(struct vcpu * v)314 hvm_guest_x86_mode(struct vcpu *v)
315 {
316     ASSERT(v == current);
317     return hvm_funcs.guest_x86_mode(v);
318 }
319 
320 static inline void
hvm_update_host_cr3(struct vcpu * v)321 hvm_update_host_cr3(struct vcpu *v)
322 {
323     if ( hvm_funcs.update_host_cr3 )
324         hvm_funcs.update_host_cr3(v);
325 }
326 
hvm_update_guest_cr(struct vcpu * v,unsigned int cr)327 static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
328 {
329     hvm_funcs.update_guest_cr(v, cr);
330 }
331 
hvm_update_guest_efer(struct vcpu * v)332 static inline void hvm_update_guest_efer(struct vcpu *v)
333 {
334     hvm_funcs.update_guest_efer(v);
335 }
336 
hvm_update_guest_vendor(struct vcpu * v)337 static inline void hvm_update_guest_vendor(struct vcpu *v)
338 {
339     hvm_funcs.update_guest_vendor(v);
340 }
341 
342 /*
343  * Called to ensure than all guest-specific mappings in a tagged TLB are
344  * flushed; does *not* flush Xen's TLB entries, and on processors without a
345  * tagged TLB it will be a noop.
346  */
hvm_flush_guest_tlbs(void)347 static inline void hvm_flush_guest_tlbs(void)
348 {
349     if ( hvm_enabled )
350         hvm_asid_flush_core();
351 }
352 
353 void hvm_hypercall_page_initialise(struct domain *d,
354                                    void *hypercall_page);
355 
356 static inline unsigned int
hvm_get_cpl(struct vcpu * v)357 hvm_get_cpl(struct vcpu *v)
358 {
359     return hvm_funcs.get_cpl(v);
360 }
361 
362 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
363                               struct segment_register *reg);
364 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
365                               struct segment_register *reg);
366 
hvm_get_shadow_gs_base(struct vcpu * v)367 static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
368 {
369     return hvm_funcs.get_shadow_gs_base(v);
370 }
371 
hvm_get_guest_bndcfgs(struct vcpu * v,u64 * val)372 static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
373 {
374     return hvm_funcs.get_guest_bndcfgs &&
375            hvm_funcs.get_guest_bndcfgs(v, val);
376 }
377 
378 bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
379 
380 #define has_hvm_params(d) \
381     ((d)->arch.hvm_domain.params != NULL)
382 
383 #define viridian_feature_mask(d) \
384     (has_hvm_params(d) ? (d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN] : 0)
385 
386 #define is_viridian_domain(d) \
387     (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
388 
389 #define has_viridian_time_ref_count(d) \
390     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count))
391 
392 #define has_viridian_apic_assist(d) \
393     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
394 
395 bool hvm_check_cpuid_faulting(struct vcpu *v);
396 void hvm_migrate_timers(struct vcpu *v);
397 void hvm_do_resume(struct vcpu *v);
398 void hvm_migrate_pirqs(struct vcpu *v);
399 
400 void hvm_inject_event(const struct x86_event *event);
401 
hvm_inject_hw_exception(unsigned int vector,int errcode)402 static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
403 {
404     struct x86_event event = {
405         .vector = vector,
406         .type = X86_EVENTTYPE_HW_EXCEPTION,
407         .error_code = errcode,
408     };
409 
410     hvm_inject_event(&event);
411 }
412 
hvm_inject_page_fault(int errcode,unsigned long cr2)413 static inline void hvm_inject_page_fault(int errcode, unsigned long cr2)
414 {
415     struct x86_event event = {
416         .vector = TRAP_page_fault,
417         .type = X86_EVENTTYPE_HW_EXCEPTION,
418         .error_code = errcode,
419         .cr2 = cr2,
420     };
421 
422     hvm_inject_event(&event);
423 }
424 
hvm_event_pending(struct vcpu * v)425 static inline int hvm_event_pending(struct vcpu *v)
426 {
427     return hvm_funcs.event_pending(v);
428 }
429 
430 /* These bits in CR4 are owned by the host. */
431 #define HVM_CR4_HOST_MASK (mmu_cr4_features & \
432     (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
433 
434 /* These exceptions must always be intercepted. */
435 #define HVM_TRAP_MASK ((1U << TRAP_debug)           | \
436                        (1U << TRAP_alignment_check) | \
437                        (1U << TRAP_machine_check))
438 
439 int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
440 
441 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
442 
443 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
444 
hvm_cpu_up(void)445 static inline int hvm_cpu_up(void)
446 {
447     return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
448 }
449 
hvm_cpu_down(void)450 static inline void hvm_cpu_down(void)
451 {
452     if ( hvm_funcs.cpu_down )
453         hvm_funcs.cpu_down();
454 }
455 
hvm_get_insn_bytes(struct vcpu * v,uint8_t * buf)456 static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
457 {
458     return (hvm_funcs.get_insn_bytes ? hvm_funcs.get_insn_bytes(v, buf) : 0);
459 }
460 
461 enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
462 void hvm_task_switch(
463     uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
464     int32_t errcode);
465 
466 enum hvm_access_type {
467     hvm_access_insn_fetch,
468     hvm_access_none,
469     hvm_access_read,
470     hvm_access_write
471 };
472 bool_t hvm_virtual_to_linear_addr(
473     enum x86_segment seg,
474     const struct segment_register *reg,
475     unsigned long offset,
476     unsigned int bytes,
477     enum hvm_access_type access_type,
478     const struct segment_register *active_cs,
479     unsigned long *linear_addr);
480 
481 void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
482                              bool_t *writable);
483 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
484 void hvm_unmap_guest_frame(void *p, bool_t permanent);
485 void hvm_mapped_guest_frames_mark_dirty(struct domain *);
486 
hvm_set_info_guest(struct vcpu * v)487 static inline void hvm_set_info_guest(struct vcpu *v)
488 {
489     if ( hvm_funcs.set_info_guest )
490         return hvm_funcs.set_info_guest(v);
491 }
492 
493 int hvm_debug_op(struct vcpu *v, int32_t op);
494 
495 /* Caller should pause vcpu before calling this function */
496 void hvm_toggle_singlestep(struct vcpu *v);
497 
hvm_invalidate_regs_fields(struct cpu_user_regs * regs)498 static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
499 {
500 #ifndef NDEBUG
501     regs->error_code = 0xbeef;
502     regs->entry_vector = 0xbeef;
503     regs->saved_upcall_mask = 0xbf;
504     regs->cs = 0xbeef;
505     regs->ss = 0xbeef;
506     regs->ds = 0xbeef;
507     regs->es = 0xbeef;
508     regs->fs = 0xbeef;
509     regs->gs = 0xbeef;
510 #endif
511 }
512 
513 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
514                               struct npfec npfec);
515 
516 #define hvm_msr_tsc_aux(v) ({                                               \
517     struct domain *__d = (v)->domain;                                       \
518     (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP)                               \
519         ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm_vcpu.msr_tsc_aux; \
520 })
521 
522 int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
523 int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content);
524 
525 /*
526  * Nested HVM
527  */
528 
529 /* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
530  * 'trapnr' exception.
531  */
nhvm_vcpu_vmexit_event(struct vcpu * v,const struct x86_event * event)532 static inline int nhvm_vcpu_vmexit_event(
533     struct vcpu *v, const struct x86_event *event)
534 {
535     return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
536 }
537 
538 /* returns l1 guest's cr3 that points to the page table used to
539  * translate l2 guest physical address to l1 guest physical address.
540  */
nhvm_vcpu_p2m_base(struct vcpu * v)541 static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
542 {
543     return hvm_funcs.nhvm_vcpu_p2m_base(v);
544 }
545 
546 /* returns true, when l1 guest intercepts the specified trap */
nhvm_vmcx_guest_intercepts_event(struct vcpu * v,unsigned int vector,int errcode)547 static inline bool_t nhvm_vmcx_guest_intercepts_event(
548     struct vcpu *v, unsigned int vector, int errcode)
549 {
550     return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
551 }
552 
553 /* returns true when l1 guest wants to use hap to run l2 guest */
nhvm_vmcx_hap_enabled(struct vcpu * v)554 static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
555 {
556     return hvm_funcs.nhvm_vmcx_hap_enabled(v);
557 }
558 
559 /* interrupt */
nhvm_interrupt_blocked(struct vcpu * v)560 static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
561 {
562     return hvm_funcs.nhvm_intr_blocked(v);
563 }
564 
hvm_enable_msr_interception(struct domain * d,uint32_t msr)565 static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t msr)
566 {
567     if ( hvm_funcs.enable_msr_interception )
568     {
569         hvm_funcs.enable_msr_interception(d, msr);
570         return 1;
571     }
572 
573     return 0;
574 }
575 
hvm_is_singlestep_supported(void)576 static inline bool_t hvm_is_singlestep_supported(void)
577 {
578     return (hvm_funcs.is_singlestep_supported &&
579             hvm_funcs.is_singlestep_supported());
580 }
581 
582 /* returns true if hardware supports alternate p2m's */
hvm_altp2m_supported(void)583 static inline bool hvm_altp2m_supported(void)
584 {
585     return hvm_funcs.altp2m_supported;
586 }
587 
588 /* updates the current hardware p2m */
altp2m_vcpu_update_p2m(struct vcpu * v)589 static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
590 {
591     if ( hvm_funcs.altp2m_vcpu_update_p2m )
592         hvm_funcs.altp2m_vcpu_update_p2m(v);
593 }
594 
595 /* updates VMCS fields related to VMFUNC and #VE */
altp2m_vcpu_update_vmfunc_ve(struct vcpu * v)596 static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
597 {
598     if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
599         hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
600 }
601 
602 /* emulates #VE */
altp2m_vcpu_emulate_ve(struct vcpu * v)603 static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
604 {
605     if ( hvm_funcs.altp2m_vcpu_emulate_ve )
606     {
607         hvm_funcs.altp2m_vcpu_emulate_ve(v);
608         return true;
609     }
610     return false;
611 }
612 
613 /* Check CR4/EFER values */
614 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
615                            signed int cr0_pg);
616 unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore);
617 
618 /*
619  * This must be defined as a macro instead of an inline function,
620  * because it uses 'struct vcpu' and 'struct domain' which have
621  * not been defined yet.
622  */
623 #define arch_vcpu_block(v) ({                                   \
624     struct vcpu *v_ = (v);                                      \
625     struct domain *d_ = v_->domain;                             \
626     if ( is_hvm_domain(d_) &&                               \
627          (d_->arch.hvm_domain.pi_ops.vcpu_block) )          \
628         d_->arch.hvm_domain.pi_ops.vcpu_block(v_);          \
629 })
630 
631 #endif /* __ASM_X86_HVM_HVM_H__ */
632 
633 /*
634  * Local variables:
635  * mode: C
636  * c-file-style: "BSD"
637  * c-basic-offset: 4
638  * tab-width: 4
639  * indent-tabs-mode: nil
640  * End:
641  */
642