1  /*
2   * vcpu.h: HVM per vcpu definitions
3   *
4   * Copyright (c) 2005, International Business Machines Corporation.
5   *
6   * This program is free software; you can redistribute it and/or modify it
7   * under the terms and conditions of the GNU General Public License,
8   * version 2, as published by the Free Software Foundation.
9   *
10   * This program is distributed in the hope it will be useful, but WITHOUT
11   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   * more details.
14   *
15   * You should have received a copy of the GNU General Public License along with
16   * this program; If not, see <http://www.gnu.org/licenses/>.
17   */
18  
19  #ifndef __ASM_X86_HVM_VCPU_H__
20  #define __ASM_X86_HVM_VCPU_H__
21  
22  #include <xen/tasklet.h>
23  #include <asm/hvm/io.h>
24  #include <asm/hvm/vlapic.h>
25  #include <asm/hvm/viridian.h>
26  #include <asm/hvm/vmx/vmcs.h>
27  #include <asm/hvm/vmx/vvmx.h>
28  #include <asm/hvm/svm/vmcb.h>
29  #include <asm/hvm/svm/nestedsvm.h>
30  #include <asm/mtrr.h>
31  
32  enum hvm_io_completion {
33      HVMIO_no_completion,
34      HVMIO_mmio_completion,
35      HVMIO_pio_completion,
36      HVMIO_realmode_completion
37  };
38  
39  struct hvm_vcpu_asid {
40      uint64_t generation;
41      uint32_t asid;
42  };
43  
44  /*
45   * We may read or write up to m256 as a number of device-model
46   * transactions.
47   */
48  struct hvm_mmio_cache {
49      unsigned long gla;
50      unsigned int size;
51      uint8_t dir;
52      uint8_t pad[3]; /* make buffer[] long-aligned */
53      uint8_t buffer[32];
54  };
55  
56  struct hvm_vcpu_io {
57      /* I/O request in flight to device model. */
58      enum hvm_io_completion io_completion;
59      ioreq_t                io_req;
60  
61      /*
62       * HVM emulation:
63       *  Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
64       *  The latter is known to be an MMIO frame (not RAM).
65       *  This translation is only valid for accesses as per @mmio_access.
66       */
67      struct npfec        mmio_access;
68      unsigned long       mmio_gla;
69      unsigned long       mmio_gpfn;
70  
71      /*
72       * We may need to handle up to 3 distinct memory accesses per
73       * instruction.
74       */
75      struct hvm_mmio_cache mmio_cache[3];
76      unsigned int mmio_cache_count;
77  
78      /* For retries we shouldn't re-fetch the instruction. */
79      unsigned int mmio_insn_bytes;
80      unsigned char mmio_insn[16];
81      /*
82       * For string instruction emulation we need to be able to signal a
83       * necessary retry through other than function return codes.
84       */
85      bool_t mmio_retry;
86  
87      unsigned long msix_unmask_address;
88      unsigned long msix_snoop_address;
89      unsigned long msix_snoop_gpa;
90  
91      const struct g2m_ioport *g2m_ioport;
92  };
93  
hvm_vcpu_io_need_completion(const struct hvm_vcpu_io * vio)94  static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
95  {
96      return (vio->io_req.state == STATE_IOREQ_READY) &&
97             !vio->io_req.data_is_ptr;
98  }
99  
100  struct nestedvcpu {
101      bool_t nv_guestmode; /* vcpu in guestmode? */
102      void *nv_vvmcx; /* l1 guest virtual VMCB/VMCS */
103      void *nv_n1vmcx; /* VMCB/VMCS used to run l1 guest */
104      void *nv_n2vmcx; /* shadow VMCB/VMCS used to run l2 guest */
105  
106      uint64_t nv_vvmcxaddr; /* l1 guest physical address of nv_vvmcx */
107      paddr_t nv_n1vmcx_pa; /* host physical address of nv_n1vmcx */
108      paddr_t nv_n2vmcx_pa; /* host physical address of nv_n2vmcx */
109  
110      /* SVM/VMX arch specific */
111      union {
112          struct nestedsvm nsvm;
113          struct nestedvmx nvmx;
114      } u;
115  
116      bool_t nv_flushp2m; /* True, when p2m table must be flushed */
117      struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */
118      bool stale_np2m; /* True when p2m_base in VMCx02 is no longer valid */
119      uint64_t np2m_generation;
120  
121      struct hvm_vcpu_asid nv_n2asid;
122  
123      bool_t nv_vmentry_pending;
124      bool_t nv_vmexit_pending;
125      bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */
126  
127      /* Does l1 guest intercept io ports 0x80 and/or 0xED ?
128       * Useful to optimize io permission handling.
129       */
130      bool_t nv_ioport80;
131      bool_t nv_ioportED;
132  
133      /* L2's control-resgister, just as the L2 sees them. */
134      unsigned long       guest_cr[5];
135  };
136  
137  #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu)
138  
139  struct altp2mvcpu {
140      uint16_t    p2midx;         /* alternate p2m index */
141      gfn_t       veinfo_gfn;     /* #VE information page gfn */
142  };
143  
144  #define vcpu_altp2m(v) ((v)->arch.hvm_vcpu.avcpu)
145  
146  struct hvm_vcpu {
147      /* Guest control-register and EFER values, just as the guest sees them. */
148      unsigned long       guest_cr[5];
149      unsigned long       guest_efer;
150  
151      /*
152       * Processor-visible control-register values, while guest executes.
153       *  CR0, CR4: Used as a cache of VMCS contents by VMX only.
154       *  CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2).
155       *  CR3:      Always used and kept up to date by paging subsystem.
156       */
157      unsigned long       hw_cr[5];
158  
159      struct vlapic       vlapic;
160      s64                 cache_tsc_offset;
161      u64                 guest_time;
162  
163      /* Lock and list for virtual platform timers. */
164      spinlock_t          tm_lock;
165      struct list_head    tm_list;
166  
167      bool                flag_dr_dirty;
168      bool                debug_state_latch;
169      bool                single_step;
170  
171      struct hvm_vcpu_asid n1asid;
172  
173      u32                 msr_tsc_aux;
174      u64                 msr_tsc_adjust;
175      u64                 msr_xss;
176  
177      union {
178          struct arch_vmx_struct vmx;
179          struct arch_svm_struct svm;
180      } u;
181  
182      struct tasklet      assert_evtchn_irq_tasklet;
183  
184      struct nestedvcpu   nvcpu;
185  
186      struct altp2mvcpu   avcpu;
187  
188      struct mtrr_state   mtrr;
189      u64                 pat_cr;
190  
191      /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */
192      int64_t             stime_offset;
193  
194      u8                  evtchn_upcall_vector;
195  
196      /* Which cache mode is this VCPU in (CR0:CD/NW)? */
197      u8                  cache_mode;
198  
199      struct hvm_vcpu_io  hvm_io;
200  
201      /* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
202      void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
203      void *fpu_exception_callback_arg;
204  
205      /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
206      struct x86_event     inject_event;
207  
208      struct viridian_vcpu viridian;
209  };
210  
211  #endif /* __ASM_X86_HVM_VCPU_H__ */
212  
213  /*
214   * Local variables:
215   * mode: C
216   * c-file-style: "BSD"
217   * c-basic-offset: 4
218   * tab-width: 4
219   * indent-tabs-mode: nil
220   * End:
221   */
222