1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
3
4 #include <xen/cache.h>
5 #include <xen/timer.h>
6 #include <asm/page.h>
7 #include <asm/p2m.h>
8 #include <asm/vfp.h>
9 #include <asm/mmio.h>
10 #include <asm/gic.h>
11 #include <asm/vgic.h>
12 #include <asm/vpl011.h>
13 #include <public/hvm/params.h>
14
15 struct hvm_domain
16 {
17 uint64_t params[HVM_NR_PARAMS];
18 };
19
20 #ifdef CONFIG_ARM_64
21 enum domain_type {
22 DOMAIN_32BIT,
23 DOMAIN_64BIT,
24 };
25 #define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT)
26 #define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT)
27 #else
28 #define is_32bit_domain(d) (1)
29 #define is_64bit_domain(d) (0)
30 #endif
31
32 /*
33 * Is the domain using the host memory layout?
34 *
35 * Direct-mapped domain will always have the RAM mapped with GFN == MFN.
36 * To avoid any trouble finding space, it is easier to force using the
37 * host memory layout.
38 *
39 * The hardware domain will use the host layout regardless of
40 * direct-mapped because some OS may rely on a specific address ranges
41 * for the devices.
42 */
43 #define domain_use_host_layout(d) (is_domain_direct_mapped(d) || \
44 is_hardware_domain(d))
45
46 struct vtimer {
47 struct vcpu *v;
48 int irq;
49 struct timer timer;
50 register_t ctl;
51 uint64_t cval;
52 };
53
54 struct paging_domain {
55 spinlock_t lock;
56 /* Free P2M pages from the pre-allocated P2M pool */
57 struct page_list_head p2m_freelist;
58 /* Number of pages from the pre-allocated P2M pool */
59 unsigned long p2m_total_pages;
60 };
61
62 struct arch_domain
63 {
64 #ifdef CONFIG_ARM_64
65 enum domain_type type;
66 #endif
67
68 #ifdef CONFIG_ARM64_SVE
69 /* max SVE encoded vector length */
70 uint8_t sve_vl;
71 #endif
72
73 /* Virtual MMU */
74 struct p2m_domain p2m;
75
76 struct hvm_domain hvm;
77
78 #ifdef CONFIG_ARCH_PAGING_MEMPOOL
79 struct paging_domain paging;
80 #endif
81
82 struct vmmio vmmio;
83
84 /* Continuable domain_relinquish_resources(). */
85 unsigned int rel_priv;
86
87 struct {
88 uint64_t offset;
89 s_time_t nanoseconds;
90 } virt_timer_base;
91
92 struct vgic_dist vgic;
93
94 #ifdef CONFIG_HWDOM_VUART
95 struct vuart {
96 #define VUART_BUF_SIZE 128
97 char *buf;
98 int idx;
99 const struct vuart_info *info;
100 spinlock_t lock;
101 } vuart;
102 #endif
103
104 unsigned int evtchn_irq;
105 #ifdef CONFIG_ACPI
106 void *efi_acpi_table;
107 paddr_t efi_acpi_gpa;
108 paddr_t efi_acpi_len;
109 #endif
110
111 /* Monitor options */
112 struct {
113 uint8_t privileged_call_enabled : 1;
114 } monitor;
115
116 #ifdef CONFIG_SBSA_VUART_CONSOLE
117 struct vpl011 vpl011;
118 #endif
119
120 #ifdef CONFIG_TEE
121 void *tee;
122 #endif
123
124 } __cacheline_aligned;
125
126 struct arch_vcpu
127 {
128 struct {
129 #ifdef CONFIG_ARM_32
130 register_t r4;
131 register_t r5;
132 register_t r6;
133 register_t r7;
134 register_t r8;
135 register_t r9;
136 register_t sl;
137 #else
138 register_t x19;
139 register_t x20;
140 register_t x21;
141 register_t x22;
142 register_t x23;
143 register_t x24;
144 register_t x25;
145 register_t x26;
146 register_t x27;
147 register_t x28;
148 #endif
149 register_t fp;
150 register_t sp;
151 register_t pc;
152 } saved_context;
153
154 void *stack;
155
156 /*
157 * Points into ->stack, more convenient than doing pointer arith
158 * all the time.
159 */
160 struct cpu_info *cpu_info;
161
162 /* Fault Status */
163 #ifdef CONFIG_ARM_32
164 uint32_t dfsr;
165 uint32_t dfar, ifar;
166 #else
167 uint64_t far;
168 uint32_t esr;
169 #endif
170
171 uint32_t ifsr; /* 32-bit guests only */
172 uint32_t afsr0, afsr1;
173
174 /* MMU */
175 register_t vbar;
176 register_t ttbcr;
177 uint64_t ttbr0, ttbr1;
178
179 uint32_t dacr; /* 32-bit guests only */
180 uint64_t par;
181 #ifdef CONFIG_ARM_32
182 uint32_t mair0, mair1;
183 uint32_t amair0, amair1;
184 #else
185 uint64_t mair;
186 uint64_t amair;
187 #endif
188
189 /* Control Registers */
190 register_t sctlr;
191 register_t actlr;
192 uint32_t cpacr;
193
194 uint32_t contextidr;
195 register_t tpidr_el0;
196 register_t tpidr_el1;
197 register_t tpidrro_el0;
198
199 /* HYP configuration */
200 #ifdef CONFIG_ARM64_SVE
201 register_t zcr_el1;
202 register_t zcr_el2;
203 #endif
204
205 register_t cptr_el2;
206 register_t hcr_el2;
207 register_t mdcr_el2;
208
209 uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
210 #ifdef CONFIG_ARM_32
211 /*
212 * ARMv8 only supports a trivial implementation on Jazelle when in AArch32
213 * mode and therefore has no extended control registers.
214 */
215 uint32_t joscr, jmcr;
216 #endif
217
218 /* Float-pointer */
219 struct vfp_state vfp;
220
221 /* CP 15 */
222 uint32_t csselr;
223 register_t vmpidr;
224
225 /* Holds gic context data */
226 union gic_state_data gic;
227 uint64_t lr_mask;
228
229 struct vgic_cpu vgic;
230
231 /* Timer registers */
232 register_t cntkctl;
233
234 struct vtimer phys_timer;
235 struct vtimer virt_timer;
236 bool vtimer_initialized;
237
238 /*
239 * The full P2M may require some cleaning (e.g when emulation
240 * set/way). As the action can take a long time, it requires
241 * preemption. It is deferred until we return to guest, where we can
242 * more easily check for softirqs and preempt the vCPU safely.
243 */
244 bool need_flush_to_ram;
245
246 } __cacheline_aligned;
247
248 void vcpu_show_registers(struct vcpu *v);
249 void vcpu_switch_to_aarch64_mode(struct vcpu *v);
250
251 /*
252 * Due to the restriction of GICv3, the number of vCPUs in AFF0 is
253 * limited to 16, thus only the first 4 bits of AFF0 are legal. We will
254 * use the first 2 affinity levels here, expanding the number of vCPU up
255 * to 4096(==16*256), which is more than the PEs that GIC-500 supports.
256 *
257 * Since we don't save information of vCPU's topology (affinity) in
258 * vMPIDR at the moment, we map the vcpuid to the vMPIDR linearly.
259 */
vaffinity_to_vcpuid(register_t vaff)260 static inline unsigned int vaffinity_to_vcpuid(register_t vaff)
261 {
262 unsigned int vcpuid;
263
264 vaff &= MPIDR_HWID_MASK;
265
266 vcpuid = MPIDR_AFFINITY_LEVEL(vaff, 0);
267 vcpuid |= MPIDR_AFFINITY_LEVEL(vaff, 1) << 4;
268
269 return vcpuid;
270 }
271
vcpuid_to_vaffinity(unsigned int vcpuid)272 static inline register_t vcpuid_to_vaffinity(unsigned int vcpuid)
273 {
274 register_t vaff;
275
276 /*
277 * Right now only AFF0 and AFF1 are supported in virtual affinity.
278 * Since only the first 4 bits in AFF0 are used in GICv3, the
279 * available bits are 12 (4+8).
280 */
281 BUILD_BUG_ON(!(MAX_VIRT_CPUS < ((1 << 12))));
282
283 vaff = (vcpuid & 0x0f) << MPIDR_LEVEL_SHIFT(0);
284 vaff |= ((vcpuid >> 4) & MPIDR_LEVEL_MASK) << MPIDR_LEVEL_SHIFT(1);
285
286 return vaff;
287 }
288
alloc_vcpu_guest_context(void)289 static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
290 {
291 return xmalloc(struct vcpu_guest_context);
292 }
293
free_vcpu_guest_context(struct vcpu_guest_context * vgc)294 static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
295 {
296 xfree(vgc);
297 }
298
arch_vcpu_block(struct vcpu * v)299 static inline void arch_vcpu_block(struct vcpu *v) {}
300
301 #define arch_vm_assist_valid_mask(d) (1UL << VMASST_TYPE_runstate_update_flag)
302
303 /* vPCI is not available on Arm */
304 #define has_vpci(d) ({ (void)(d); false; })
305
306 struct arch_vcpu_io {
307 struct instr_details dabt_instr; /* when the instruction is decoded */
308 };
309
310 struct guest_memory_policy {};
update_guest_memory_policy(struct vcpu * v,struct guest_memory_policy * gmp)311 static inline void update_guest_memory_policy(struct vcpu *v,
312 struct guest_memory_policy *gmp)
313 {}
314
315 #endif /* __ASM_DOMAIN_H__ */
316
317 /*
318 * Local variables:
319 * mode: C
320 * c-file-style: "BSD"
321 * c-basic-offset: 4
322 * tab-width: 4
323 * indent-tabs-mode: nil
324 * End:
325 */
326