1 /*
2  * domain.h: HVM per domain definitions
3  *
4  * Copyright (c) 2004, Intel Corporation.
5  * Copyright (c) 2005, International Business Machines Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef __ASM_X86_HVM_DOMAIN_H__
21 #define __ASM_X86_HVM_DOMAIN_H__
22 
23 #include <xen/iommu.h>
24 #include <asm/hvm/irq.h>
25 #include <asm/hvm/vpt.h>
26 #include <asm/hvm/vlapic.h>
27 #include <asm/hvm/vioapic.h>
28 #include <asm/hvm/io.h>
29 #include <asm/hvm/viridian.h>
30 #include <asm/hvm/vmx/vmcs.h>
31 #include <asm/hvm/svm/vmcb.h>
32 #include <public/grant_table.h>
33 #include <public/hvm/params.h>
34 #include <public/hvm/save.h>
35 #include <public/hvm/hvm_op.h>
36 #include <public/hvm/dm_op.h>
37 
38 struct hvm_ioreq_page {
39     unsigned long gfn;
40     struct page_info *page;
41     void *va;
42 };
43 
44 struct hvm_ioreq_vcpu {
45     struct list_head list_entry;
46     struct vcpu      *vcpu;
47     evtchn_port_t    ioreq_evtchn;
48     bool             pending;
49 };
50 
51 #define NR_IO_RANGE_TYPES (XEN_DMOP_IO_RANGE_PCI + 1)
52 #define MAX_NR_IO_RANGES  256
53 
54 struct hvm_ioreq_server {
55     struct list_head       list_entry;
56     struct domain          *domain;
57 
58     /* Lock to serialize toolstack modifications */
59     spinlock_t             lock;
60 
61     /* Domain id of emulating domain */
62     domid_t                domid;
63     ioservid_t             id;
64     struct hvm_ioreq_page  ioreq;
65     struct list_head       ioreq_vcpu_list;
66     struct hvm_ioreq_page  bufioreq;
67 
68     /* Lock to serialize access to buffered ioreq ring */
69     spinlock_t             bufioreq_lock;
70     evtchn_port_t          bufioreq_evtchn;
71     struct rangeset        *range[NR_IO_RANGE_TYPES];
72     bool                   enabled;
73     bool                   bufioreq_atomic;
74 };
75 
76 /*
77  * This structure defines function hooks to support hardware-assisted
78  * virtual interrupt delivery to guest. (e.g. VMX PI and SVM AVIC).
79  *
80  * These hooks are defined by the underlying arch-specific code
81  * as needed. For example:
82  *   - When the domain is enabled with virtual IPI delivery
83  *   - When the domain is enabled with virtual I/O int delivery
84  *     and actually has a physical device assigned .
85  */
86 struct hvm_pi_ops {
87     /* Hook into ctx_switch_from. */
88     void (*switch_from)(struct vcpu *v);
89 
90     /* Hook into ctx_switch_to. */
91     void (*switch_to)(struct vcpu *v);
92 
93     /*
94      * Hook into arch_vcpu_block(), which is called
95      * from vcpu_block() and vcpu_do_poll().
96      */
97     void (*vcpu_block)(struct vcpu *);
98 
99     /* Hook into the vmentry path. */
100     void (*do_resume)(struct vcpu *v);
101 };
102 
103 struct hvm_domain {
104     /* Guest page range used for non-default ioreq servers */
105     struct {
106         unsigned long base;
107         unsigned long mask;
108     } ioreq_gfn;
109 
110     /* Lock protects all other values in the sub-struct and the default */
111     struct {
112         spinlock_t       lock;
113         ioservid_t       id;
114         struct list_head list;
115     } ioreq_server;
116     struct hvm_ioreq_server *default_ioreq_server;
117 
118     /* Cached CF8 for guest PCI config cycles */
119     uint32_t                pci_cf8;
120 
121     struct pl_time         *pl_time;
122 
123     struct hvm_io_handler *io_handler;
124     unsigned int          io_handler_count;
125 
126     /* Lock protects access to irq, vpic and vioapic. */
127     spinlock_t             irq_lock;
128     struct hvm_irq        *irq;
129     struct hvm_hw_vpic     vpic[2]; /* 0=master; 1=slave */
130     struct hvm_vioapic    **vioapic;
131     unsigned int           nr_vioapics;
132     struct hvm_hw_stdvga   stdvga;
133 
134     /*
135      * hvm_hw_pmtimer is a publicly-visible name. We will defer renaming
136      * it to the more appropriate hvm_hw_acpi until the expected
137      * comprehensive rewrte of migration code, thus avoiding code churn
138      * in public header files.
139      * Internally, however, we will be using hvm_hw_acpi.
140      */
141 #define hvm_hw_acpi hvm_hw_pmtimer
142     struct hvm_hw_acpi     acpi;
143 
144     /* VCPU which is current target for 8259 interrupts. */
145     struct vcpu           *i8259_target;
146 
147     /* emulated irq to pirq */
148     struct radix_tree_root emuirq_pirq;
149 
150     uint64_t              *params;
151 
152     /* Memory ranges with pinned cache attributes. */
153     struct list_head       pinned_cacheattr_ranges;
154 
155     /* VRAM dirty support.  Protect with the domain paging lock. */
156     struct sh_dirty_vram *dirty_vram;
157 
158     /* If one of vcpus of this domain is in no_fill_mode or
159      * mtrr/pat between vcpus is not the same, set is_in_uc_mode
160      */
161     spinlock_t             uc_lock;
162     bool_t                 is_in_uc_mode;
163 
164     /* hypervisor intercepted msix table */
165     struct list_head       msixtbl_list;
166 
167     struct viridian_domain viridian;
168 
169     bool_t                 hap_enabled;
170     bool_t                 mem_sharing_enabled;
171     bool_t                 qemu_mapcache_invalidate;
172     bool_t                 is_s3_suspended;
173 
174     /*
175      * TSC value that VCPUs use to calculate their tsc_offset value.
176      * Used during initialization and save/restore.
177      */
178     uint64_t sync_tsc;
179 
180     uint64_t tsc_scaling_ratio;
181 
182     unsigned long *io_bitmap;
183 
184     /* List of guest to machine IO ports mapping. */
185     struct list_head g2m_ioport_list;
186 
187     /* List of permanently write-mapped pages. */
188     struct {
189         spinlock_t lock;
190         struct list_head list;
191     } write_map;
192 
193     struct hvm_pi_ops pi_ops;
194 
195     union {
196         struct vmx_domain vmx;
197         struct svm_domain svm;
198     };
199 };
200 
201 #define hap_enabled(d)  ((d)->arch.hvm_domain.hap_enabled)
202 
203 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
204 
205 /*
206  * Local variables:
207  * mode: C
208  * c-file-style: "BSD"
209  * c-basic-offset: 4
210  * tab-width: 4
211  * indent-tabs-mode: nil
212  * End:
213  */
214