1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
3 
4 #include <xen/cache.h>
5 #include <xen/sched.h>
6 #include <asm/page.h>
7 #include <asm/p2m.h>
8 #include <asm/vfp.h>
9 #include <asm/mmio.h>
10 #include <asm/gic.h>
11 #include <public/hvm/params.h>
12 #include <xen/serial.h>
13 #include <xen/rbtree.h>
14 #include <asm-arm/vpl011.h>
15 
16 struct hvm_domain
17 {
18     uint64_t              params[HVM_NR_PARAMS];
19 };
20 
21 #ifdef CONFIG_ARM_64
22 enum domain_type {
23     DOMAIN_32BIT,
24     DOMAIN_64BIT,
25 };
26 #define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT)
27 #define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT)
28 #else
29 #define is_32bit_domain(d) (1)
30 #define is_64bit_domain(d) (0)
31 #endif
32 
33 extern int dom0_11_mapping;
34 #define is_domain_direct_mapped(d) ((d) == hardware_domain && dom0_11_mapping)
35 
36 struct vtimer {
37         struct vcpu *v;
38         int irq;
39         struct timer timer;
40         uint32_t ctl;
41         uint64_t cval;
42 };
43 
44 struct arch_domain
45 {
46 #ifdef CONFIG_ARM_64
47     enum domain_type type;
48 #endif
49 
50     /* Virtual MMU */
51     struct p2m_domain p2m;
52 
53     struct hvm_domain hvm_domain;
54 
55     struct vmmio vmmio;
56 
57     /* Continuable domain_relinquish_resources(). */
58     enum {
59         RELMEM_not_started,
60         RELMEM_xen,
61         RELMEM_page,
62         RELMEM_mapping,
63         RELMEM_done,
64     } relmem;
65 
66     /* Virtual CPUID */
67     uint32_t vpidr;
68 
69     struct {
70         uint64_t offset;
71     } phys_timer_base;
72     struct {
73         uint64_t offset;
74     } virt_timer_base;
75 
76     struct {
77         /* Version of the vGIC */
78         enum gic_version version;
79         /* GIC HW version specific vGIC driver handler */
80         const struct vgic_ops *handler;
81         /*
82          * Covers access to other members of this struct _except_ for
83          * shared_irqs where each member contains its own locking.
84          *
85          * If both class of lock is required then this lock must be
86          * taken first. If multiple rank locks are required (including
87          * the per-vcpu private_irqs rank) then they must be taken in
88          * rank order.
89          */
90         spinlock_t lock;
91         uint32_t ctlr;
92         int nr_spis; /* Number of SPIs */
93         unsigned long *allocated_irqs; /* bitmap of IRQs allocated */
94         struct vgic_irq_rank *shared_irqs;
95         /*
96          * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
97          * struct arch_vcpu.
98          */
99         struct pending_irq *pending_irqs;
100         /* Base address for guest GIC */
101         paddr_t dbase; /* Distributor base address */
102 #ifdef CONFIG_HAS_GICV3
103         /* GIC V3 addressing */
104         /* List of contiguous occupied by the redistributors */
105         struct vgic_rdist_region {
106             paddr_t base;                   /* Base address */
107             paddr_t size;                   /* Size */
108             unsigned int first_cpu;         /* First CPU handled */
109         } *rdist_regions;
110         int nr_regions;                     /* Number of rdist regions */
111         uint32_t rdist_stride;              /* Re-Distributor stride */
112         unsigned long int nr_lpis;
113         uint64_t rdist_propbase;
114         struct rb_root its_devices;         /* Devices mapped to an ITS */
115         spinlock_t its_devices_lock;        /* Protects the its_devices tree */
116         struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's */
117         rwlock_t pend_lpi_tree_lock;        /* Protects the pend_lpi_tree */
118         struct list_head vits_list;         /* List of virtual ITSes */
119         unsigned int intid_bits;
120         /*
121          * TODO: if there are more bool's being added below, consider
122          * a flags variable instead.
123          */
124         bool rdists_enabled;                /* Is any redistributor enabled? */
125         bool has_its;
126 #endif
127     } vgic;
128 
129     struct vuart {
130 #define VUART_BUF_SIZE 128
131         char                        *buf;
132         int                         idx;
133         const struct vuart_info     *info;
134         spinlock_t                  lock;
135     } vuart;
136 
137     unsigned int evtchn_irq;
138 #ifdef CONFIG_ACPI
139     void *efi_acpi_table;
140     paddr_t efi_acpi_gpa;
141     paddr_t efi_acpi_len;
142 #endif
143 
144     /* Monitor options */
145     struct {
146         uint8_t privileged_call_enabled : 1;
147     } monitor;
148 
149 #ifdef CONFIG_SBSA_VUART_CONSOLE
150     struct vpl011 vpl011;
151 #endif
152 
153 }  __cacheline_aligned;
154 
155 struct arch_vcpu
156 {
157     struct {
158 #ifdef CONFIG_ARM_32
159         register_t r4;
160         register_t r5;
161         register_t r6;
162         register_t r7;
163         register_t r8;
164         register_t r9;
165         register_t sl;
166 #else
167         register_t x19;
168         register_t x20;
169         register_t x21;
170         register_t x22;
171         register_t x23;
172         register_t x24;
173         register_t x25;
174         register_t x26;
175         register_t x27;
176         register_t x28;
177 #endif
178         register_t fp;
179         register_t sp;
180         register_t pc;
181     } saved_context;
182 
183     void *stack;
184 
185     /*
186      * Points into ->stack, more convenient than doing pointer arith
187      * all the time.
188      */
189     struct cpu_info *cpu_info;
190 
191     /* Fault Status */
192 #ifdef CONFIG_ARM_32
193     uint32_t dfsr;
194     uint32_t dfar, ifar;
195 #else
196     uint64_t far;
197     uint32_t esr;
198 #endif
199 
200     uint32_t ifsr; /* 32-bit guests only */
201     uint32_t afsr0, afsr1;
202 
203     /* MMU */
204     register_t vbar;
205     register_t ttbcr;
206     uint64_t ttbr0, ttbr1;
207 
208     uint32_t dacr; /* 32-bit guests only */
209     uint64_t par;
210 #ifdef CONFIG_ARM_32
211     uint32_t mair0, mair1;
212     uint32_t amair0, amair1;
213 #else
214     uint64_t mair;
215     uint64_t amair;
216 #endif
217 
218     /* Control Registers */
219     uint32_t actlr, sctlr;
220     uint32_t cpacr;
221 
222     uint32_t contextidr;
223     register_t tpidr_el0;
224     register_t tpidr_el1;
225     register_t tpidrro_el0;
226 
227     /* HYP configuration */
228     register_t hcr_el2;
229 
230     uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
231 #ifdef CONFIG_ARM_32
232     /*
233      * ARMv8 only supports a trivial implementation on Jazelle when in AArch32
234      * mode and therefore has no extended control registers.
235      */
236     uint32_t joscr, jmcr;
237 #endif
238 
239     /* Float-pointer */
240     struct vfp_state vfp;
241 
242     /* CP 15 */
243     uint32_t csselr;
244     register_t vmpidr;
245 
246     /* Holds gic context data */
247     union gic_state_data gic;
248     uint64_t lr_mask;
249 
250     struct {
251         /*
252          * SGIs and PPIs are per-VCPU, SPIs are domain global and in
253          * struct arch_domain.
254          */
255         struct pending_irq pending_irqs[32];
256         struct vgic_irq_rank *private_irqs;
257 
258         /* This list is ordered by IRQ priority and it is used to keep
259          * track of the IRQs that the VGIC injected into the guest.
260          * Depending on the availability of LR registers, the IRQs might
261          * actually be in an LR, and therefore injected into the guest,
262          * or queued in gic.lr_pending.
263          * As soon as an IRQ is EOI'd by the guest and removed from the
264          * corresponding LR it is also removed from this list. */
265         struct list_head inflight_irqs;
266         /* lr_pending is used to queue IRQs (struct pending_irq) that the
267          * vgic tried to inject in the guest (calling gic_set_guest_irq) but
268          * no LRs were available at the time.
269          * As soon as an LR is freed we remove the first IRQ from this
270          * list and write it to the LR register.
271          * lr_pending is a subset of vgic.inflight_irqs. */
272         struct list_head lr_pending;
273         spinlock_t lock;
274 
275         /* GICv3: redistributor base and flags for this vCPU */
276         paddr_t rdist_base;
277         uint64_t rdist_pendbase;
278 #define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
279 #define VGIC_V3_LPIS_ENABLED    (1 << 1)
280         uint8_t flags;
281     } vgic;
282 
283     /* Timer registers  */
284     uint32_t cntkctl;
285 
286     struct vtimer phys_timer;
287     struct vtimer virt_timer;
288     bool   vtimer_initialized;
289 }  __cacheline_aligned;
290 
291 void vcpu_show_execution_state(struct vcpu *);
292 void vcpu_show_registers(const struct vcpu *);
293 void vcpu_switch_to_aarch64_mode(struct vcpu *);
294 
295 unsigned int domain_max_vcpus(const struct domain *);
296 
297 /*
298  * Due to the restriction of GICv3, the number of vCPUs in AFF0 is
299  * limited to 16, thus only the first 4 bits of AFF0 are legal. We will
300  * use the first 2 affinity levels here, expanding the number of vCPU up
301  * to 4096(==16*256), which is more than the PEs that GIC-500 supports.
302  *
303  * Since we don't save information of vCPU's topology (affinity) in
304  * vMPIDR at the moment, we map the vcpuid to the vMPIDR linearly.
305  */
vaffinity_to_vcpuid(register_t vaff)306 static inline unsigned int vaffinity_to_vcpuid(register_t vaff)
307 {
308     unsigned int vcpuid;
309 
310     vaff &= MPIDR_HWID_MASK;
311 
312     vcpuid = MPIDR_AFFINITY_LEVEL(vaff, 0);
313     vcpuid |= MPIDR_AFFINITY_LEVEL(vaff, 1) << 4;
314 
315     return vcpuid;
316 }
317 
vcpuid_to_vaffinity(unsigned int vcpuid)318 static inline register_t vcpuid_to_vaffinity(unsigned int vcpuid)
319 {
320     register_t vaff;
321 
322     /*
323      * Right now only AFF0 and AFF1 are supported in virtual affinity.
324      * Since only the first 4 bits in AFF0 are used in GICv3, the
325      * available bits are 12 (4+8).
326      */
327     BUILD_BUG_ON(!(MAX_VIRT_CPUS < ((1 << 12))));
328 
329     vaff = (vcpuid & 0x0f) << MPIDR_LEVEL_SHIFT(0);
330     vaff |= ((vcpuid >> 4) & MPIDR_LEVEL_MASK) << MPIDR_LEVEL_SHIFT(1);
331 
332     return vaff;
333 }
334 
alloc_vcpu_guest_context(void)335 static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
336 {
337     return xmalloc(struct vcpu_guest_context);
338 }
339 
free_vcpu_guest_context(struct vcpu_guest_context * vgc)340 static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
341 {
342     xfree(vgc);
343 }
344 
arch_vcpu_block(struct vcpu * v)345 static inline void arch_vcpu_block(struct vcpu *v) {}
346 
347 #endif /* __ASM_DOMAIN_H__ */
348 
349 /*
350  * Local variables:
351  * mode: C
352  * c-file-style: "BSD"
353  * c-basic-offset: 4
354  * tab-width: 4
355  * indent-tabs-mode: nil
356  * End:
357  */
358