1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Structure definitions for HVM state that is held by Xen and must
4 * be saved along with the domain's memory and device-model state.
5 *
6 * Copyright (c) 2007 XenSource Ltd.
7 */
8
9 #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
10 #define __XEN_PUBLIC_HVM_SAVE_X86_H__
11
12 #include "../../xen.h"
13
14 /*
15 * Save/restore header: general info about the save file.
16 */
17
18 #define HVM_FILE_MAGIC 0x54381286
19 #define HVM_FILE_VERSION 0x00000001
20
21 struct hvm_save_header {
22 uint32_t magic; /* Must be HVM_FILE_MAGIC */
23 uint32_t version; /* File format version */
24 uint64_t changeset; /* Version of Xen that saved this file */
25 uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
26 uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */
27 };
28
29 DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
30
31
32 /*
33 * Processor
34 *
35 * Compat:
36 * - Pre-3.4 didn't have msr_tsc_aux
37 * - Pre-4.7 didn't have fpu_initialised
38 */
39
40 struct hvm_hw_cpu {
41 uint8_t fpu_regs[512];
42
43 uint64_t rax;
44 uint64_t rbx;
45 uint64_t rcx;
46 uint64_t rdx;
47 uint64_t rbp;
48 uint64_t rsi;
49 uint64_t rdi;
50 uint64_t rsp;
51 uint64_t r8;
52 uint64_t r9;
53 uint64_t r10;
54 uint64_t r11;
55 uint64_t r12;
56 uint64_t r13;
57 uint64_t r14;
58 uint64_t r15;
59
60 uint64_t rip;
61 uint64_t rflags;
62
63 uint64_t cr0;
64 uint64_t cr2;
65 uint64_t cr3;
66 uint64_t cr4;
67
68 uint64_t dr0;
69 uint64_t dr1;
70 uint64_t dr2;
71 uint64_t dr3;
72 uint64_t dr6;
73 uint64_t dr7;
74
75 uint32_t cs_sel;
76 uint32_t ds_sel;
77 uint32_t es_sel;
78 uint32_t fs_sel;
79 uint32_t gs_sel;
80 uint32_t ss_sel;
81 uint32_t tr_sel;
82 uint32_t ldtr_sel;
83
84 uint32_t cs_limit;
85 uint32_t ds_limit;
86 uint32_t es_limit;
87 uint32_t fs_limit;
88 uint32_t gs_limit;
89 uint32_t ss_limit;
90 uint32_t tr_limit;
91 uint32_t ldtr_limit;
92 uint32_t idtr_limit;
93 uint32_t gdtr_limit;
94
95 uint64_t cs_base;
96 uint64_t ds_base;
97 uint64_t es_base;
98 uint64_t fs_base;
99 uint64_t gs_base;
100 uint64_t ss_base;
101 uint64_t tr_base;
102 uint64_t ldtr_base;
103 uint64_t idtr_base;
104 uint64_t gdtr_base;
105
106 uint32_t cs_arbytes;
107 uint32_t ds_arbytes;
108 uint32_t es_arbytes;
109 uint32_t fs_arbytes;
110 uint32_t gs_arbytes;
111 uint32_t ss_arbytes;
112 uint32_t tr_arbytes;
113 uint32_t ldtr_arbytes;
114
115 uint64_t sysenter_cs;
116 uint64_t sysenter_esp;
117 uint64_t sysenter_eip;
118
119 /* msr for em64t */
120 uint64_t shadow_gs;
121
122 /* msr content saved/restored. */
123 uint64_t msr_flags; /* Obsolete, ignored. */
124 uint64_t msr_lstar;
125 uint64_t msr_star;
126 uint64_t msr_cstar;
127 uint64_t msr_syscall_mask;
128 uint64_t msr_efer;
129 uint64_t msr_tsc_aux;
130
131 /* guest's idea of what rdtsc() would return */
132 uint64_t tsc;
133
134 /* pending event, if any */
135 union {
136 uint32_t pending_event;
137 struct {
138 uint8_t pending_vector:8;
139 uint8_t pending_type:3;
140 uint8_t pending_error_valid:1;
141 uint32_t pending_reserved:19;
142 uint8_t pending_valid:1;
143 };
144 };
145 /* error code for pending event */
146 uint32_t error_code;
147
148 #define _XEN_X86_FPU_INITIALISED 0
149 #define XEN_X86_FPU_INITIALISED (1U<<_XEN_X86_FPU_INITIALISED)
150 uint32_t flags;
151 uint32_t pad0;
152 };
153
154 struct hvm_hw_cpu_compat {
155 uint8_t fpu_regs[512];
156
157 uint64_t rax;
158 uint64_t rbx;
159 uint64_t rcx;
160 uint64_t rdx;
161 uint64_t rbp;
162 uint64_t rsi;
163 uint64_t rdi;
164 uint64_t rsp;
165 uint64_t r8;
166 uint64_t r9;
167 uint64_t r10;
168 uint64_t r11;
169 uint64_t r12;
170 uint64_t r13;
171 uint64_t r14;
172 uint64_t r15;
173
174 uint64_t rip;
175 uint64_t rflags;
176
177 uint64_t cr0;
178 uint64_t cr2;
179 uint64_t cr3;
180 uint64_t cr4;
181
182 uint64_t dr0;
183 uint64_t dr1;
184 uint64_t dr2;
185 uint64_t dr3;
186 uint64_t dr6;
187 uint64_t dr7;
188
189 uint32_t cs_sel;
190 uint32_t ds_sel;
191 uint32_t es_sel;
192 uint32_t fs_sel;
193 uint32_t gs_sel;
194 uint32_t ss_sel;
195 uint32_t tr_sel;
196 uint32_t ldtr_sel;
197
198 uint32_t cs_limit;
199 uint32_t ds_limit;
200 uint32_t es_limit;
201 uint32_t fs_limit;
202 uint32_t gs_limit;
203 uint32_t ss_limit;
204 uint32_t tr_limit;
205 uint32_t ldtr_limit;
206 uint32_t idtr_limit;
207 uint32_t gdtr_limit;
208
209 uint64_t cs_base;
210 uint64_t ds_base;
211 uint64_t es_base;
212 uint64_t fs_base;
213 uint64_t gs_base;
214 uint64_t ss_base;
215 uint64_t tr_base;
216 uint64_t ldtr_base;
217 uint64_t idtr_base;
218 uint64_t gdtr_base;
219
220 uint32_t cs_arbytes;
221 uint32_t ds_arbytes;
222 uint32_t es_arbytes;
223 uint32_t fs_arbytes;
224 uint32_t gs_arbytes;
225 uint32_t ss_arbytes;
226 uint32_t tr_arbytes;
227 uint32_t ldtr_arbytes;
228
229 uint64_t sysenter_cs;
230 uint64_t sysenter_esp;
231 uint64_t sysenter_eip;
232
233 /* msr for em64t */
234 uint64_t shadow_gs;
235
236 /* msr content saved/restored. */
237 uint64_t msr_flags; /* Obsolete, ignored. */
238 uint64_t msr_lstar;
239 uint64_t msr_star;
240 uint64_t msr_cstar;
241 uint64_t msr_syscall_mask;
242 uint64_t msr_efer;
243 /*uint64_t msr_tsc_aux; COMPAT */
244
245 /* guest's idea of what rdtsc() would return */
246 uint64_t tsc;
247
248 /* pending event, if any */
249 union {
250 uint32_t pending_event;
251 struct {
252 uint8_t pending_vector:8;
253 uint8_t pending_type:3;
254 uint8_t pending_error_valid:1;
255 uint32_t pending_reserved:19;
256 uint8_t pending_valid:1;
257 };
258 };
259 /* error code for pending event */
260 uint32_t error_code;
261 };
262
_hvm_hw_fix_cpu(void * h,uint32_t size)263 static inline int _hvm_hw_fix_cpu(void *h, uint32_t size) {
264
265 union hvm_hw_cpu_union {
266 struct hvm_hw_cpu nat;
267 struct hvm_hw_cpu_compat cmp;
268 } *ucpu = (union hvm_hw_cpu_union *)h;
269
270 if ( size == sizeof(struct hvm_hw_cpu_compat) )
271 {
272 /*
273 * If we copy from the end backwards, we should
274 * be able to do the modification in-place.
275 */
276 ucpu->nat.error_code = ucpu->cmp.error_code;
277 ucpu->nat.pending_event = ucpu->cmp.pending_event;
278 ucpu->nat.tsc = ucpu->cmp.tsc;
279 ucpu->nat.msr_tsc_aux = 0;
280 }
281 /* Mimic the old behaviour by unconditionally setting fpu_initialised. */
282 ucpu->nat.flags = XEN_X86_FPU_INITIALISED;
283
284 return 0;
285 }
286
287 DECLARE_HVM_SAVE_TYPE_COMPAT(CPU, 2, struct hvm_hw_cpu, \
288 struct hvm_hw_cpu_compat, _hvm_hw_fix_cpu);
289
290 /*
291 * PIC
292 */
293
294 struct hvm_hw_vpic {
295 /* IR line bitmasks. */
296 uint8_t irr;
297 uint8_t imr;
298 uint8_t isr;
299
300 /* Line IRx maps to IRQ irq_base+x */
301 uint8_t irq_base;
302
303 /*
304 * Where are we in ICW2-4 initialisation (0 means no init in progress)?
305 * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
306 * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence)
307 * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
308 */
309 uint8_t init_state:4;
310
311 /* IR line with highest priority. */
312 uint8_t priority_add:4;
313
314 /* Reads from A=0 obtain ISR or IRR? */
315 uint8_t readsel_isr:1;
316
317 /* Reads perform a polling read? */
318 uint8_t poll:1;
319
320 /* Automatically clear IRQs from the ISR during INTA? */
321 uint8_t auto_eoi:1;
322
323 /* Automatically rotate IRQ priorities during AEOI? */
324 uint8_t rotate_on_auto_eoi:1;
325
326 /* Exclude slave inputs when considering in-service IRQs? */
327 uint8_t special_fully_nested_mode:1;
328
329 /* Special mask mode excludes masked IRs from AEOI and priority checks. */
330 uint8_t special_mask_mode:1;
331
332 /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
333 uint8_t is_master:1;
334
335 /* Edge/trigger selection. */
336 uint8_t elcr;
337
338 /* Virtual INT output. */
339 uint8_t int_output;
340 };
341
342 DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
343
344
345 /*
346 * IO-APIC
347 */
348
349 union vioapic_redir_entry
350 {
351 uint64_t bits;
352 struct {
353 uint8_t vector;
354 uint8_t delivery_mode:3;
355 uint8_t dest_mode:1;
356 uint8_t delivery_status:1;
357 uint8_t polarity:1;
358 uint8_t remote_irr:1;
359 uint8_t trig_mode:1;
360 uint8_t mask:1;
361 uint8_t reserve:7;
362 uint8_t reserved[4];
363 uint8_t dest_id;
364 } fields;
365 };
366
367 #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
368
369 #define XEN_HVM_VIOAPIC(name, cnt) \
370 struct name { \
371 uint64_t base_address; \
372 uint32_t ioregsel; \
373 uint32_t id; \
374 union vioapic_redir_entry redirtbl[cnt]; \
375 }
376
377 XEN_HVM_VIOAPIC(hvm_hw_vioapic, VIOAPIC_NUM_PINS);
378
379 #ifndef __XEN__
380 #undef XEN_HVM_VIOAPIC
381 #else
382 #undef VIOAPIC_NUM_PINS
383 #endif
384
385 DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
386
387
388 /*
389 * LAPIC
390 */
391
392 struct hvm_hw_lapic {
393 uint64_t apic_base_msr;
394 uint32_t disabled; /* VLAPIC_xx_DISABLED */
395 uint32_t timer_divisor;
396 uint64_t tdt_msr;
397 uint32_t pending_esr;
398 };
399
400 DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
401
402 struct hvm_hw_lapic_regs {
403 uint8_t data[1024];
404 };
405
406 DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
407
408
409 /*
410 * IRQs
411 */
412
413 struct hvm_hw_pci_irqs {
414 /*
415 * Virtual interrupt wires for a single PCI bus.
416 * Indexed by: device*4 + INTx#.
417 */
418 union {
419 unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */
420 uint64_t pad[2];
421 };
422 };
423
424 DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
425
426 struct hvm_hw_isa_irqs {
427 /*
428 * Virtual interrupt wires for ISA devices.
429 * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
430 */
431 union {
432 unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */
433 uint64_t pad[1];
434 };
435 };
436
437 DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
438
439 struct hvm_hw_pci_link {
440 /*
441 * PCI-ISA interrupt router.
442 * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
443 * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
444 * The router provides a programmable mapping from each link to a GSI.
445 */
446 uint8_t route[4];
447 uint8_t pad0[4];
448 };
449
450 DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
451
452 /*
453 * PIT
454 */
455
456 struct hvm_hw_pit {
457 struct hvm_hw_pit_channel {
458 uint32_t count; /* can be 65536 */
459 uint16_t latched_count;
460 uint8_t count_latched;
461 uint8_t status_latched;
462 uint8_t status;
463 uint8_t read_state;
464 uint8_t write_state;
465 uint8_t write_latch;
466 uint8_t rw_mode;
467 uint8_t mode;
468 uint8_t bcd; /* not supported */
469 uint8_t gate; /* timer start */
470 } channels[3]; /* 3 x 16 bytes */
471 uint32_t speaker_data_on;
472 uint32_t pad0;
473 };
474
475 DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
476
477
478 /*
479 * RTC
480 */
481
482 #define RTC_CMOS_SIZE 14
483 struct hvm_hw_rtc {
484 /* CMOS bytes */
485 uint8_t cmos_data[RTC_CMOS_SIZE];
486 /* Index register for 2-part operations */
487 uint8_t cmos_index;
488 uint8_t pad0;
489 /* RTC offset from host time */
490 int64_t rtc_offset;
491 };
492
493 DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
494
495
496 /*
497 * HPET
498 */
499
500 #define HPET_TIMER_NUM 3 /* 3 timers supported now */
501 struct hvm_hw_hpet {
502 /* Memory-mapped, software visible registers */
503 uint64_t capability; /* capabilities */
504 uint64_t res0; /* reserved */
505 uint64_t config; /* configuration */
506 uint64_t res1; /* reserved */
507 uint64_t isr; /* interrupt status reg */
508 uint64_t res2[25]; /* reserved */
509 uint64_t mc64; /* main counter */
510 uint64_t res3; /* reserved */
511 struct { /* timers */
512 uint64_t config; /* configuration/cap */
513 uint64_t cmp; /* comparator */
514 uint64_t fsb; /* FSB route, not supported now */
515 uint64_t res4; /* reserved */
516 } timers[HPET_TIMER_NUM];
517 uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */
518
519 /* Hidden register state */
520 uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
521 };
522
523 DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
524
525
526 /*
527 * PM timer
528 */
529
530 struct hvm_hw_pmtimer {
531 uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
532 uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
533 uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
534 };
535
536 DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
537
538 /*
539 * MTRR MSRs
540 */
541
542 struct hvm_hw_mtrr {
543 #define MTRR_VCNT 8
544 #define NUM_FIXED_MSR 11
545 uint64_t msr_pat_cr;
546 /* mtrr physbase & physmask msr pair*/
547 uint64_t msr_mtrr_var[MTRR_VCNT*2];
548 uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
549 uint64_t msr_mtrr_cap;
550 uint64_t msr_mtrr_def_type;
551 };
552
553 DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
554
555 /*
556 * The save area of XSAVE/XRSTOR.
557 */
558
559 struct hvm_hw_cpu_xsave {
560 uint64_t xfeature_mask; /* Ignored */
561 uint64_t xcr0; /* Updated by XSETBV */
562 uint64_t xcr0_accum; /* Updated by XSETBV */
563 struct {
564 struct { char x[512]; } fpu_sse;
565
566 struct hvm_hw_cpu_xsave_hdr {
567 uint64_t xstate_bv; /* Updated by XRSTOR */
568 uint64_t xcomp_bv; /* Updated by XRSTOR{C,S} */
569 uint64_t reserved[6];
570 } xsave_hdr; /* The 64-byte header */
571 } save_area;
572 };
573
574 #define CPU_XSAVE_CODE 16
575
576 /*
577 * Viridian hypervisor context.
578 */
579
580 struct hvm_viridian_domain_context {
581 uint64_t hypercall_gpa;
582 uint64_t guest_os_id;
583 uint64_t time_ref_count;
584 uint64_t reference_tsc;
585 };
586
587 DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
588
589 struct hvm_viridian_vcpu_context {
590 uint64_t vp_assist_msr;
591 uint8_t apic_assist_pending;
592 uint8_t _pad[7];
593 uint64_t simp_msr;
594 uint64_t sint_msr[16];
595 uint64_t stimer_config_msr[4];
596 uint64_t stimer_count_msr[4];
597 };
598
599 DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
600
601 struct hvm_vmce_vcpu {
602 uint64_t caps;
603 uint64_t mci_ctl2_bank0;
604 uint64_t mci_ctl2_bank1;
605 uint64_t mcg_ext_ctl;
606 };
607
608 DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
609
610 struct hvm_tsc_adjust {
611 uint64_t tsc_adjust;
612 };
613
614 DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
615
616
617 struct hvm_msr {
618 uint32_t count;
619 struct hvm_one_msr {
620 uint32_t index;
621 uint32_t _rsvd;
622 uint64_t val;
623 } msr[XEN_FLEX_ARRAY_DIM];
624 };
625
626 #define CPU_MSR_CODE 20
627
628 /* Range 22 - 34 (inclusive) reserved for Amazon */
629
630 /*
631 * Largest type-code in use
632 */
633 #define HVM_SAVE_CODE_MAX 20
634
635 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
636
637 /*
638 * Local variables:
639 * mode: C
640 * c-file-style: "BSD"
641 * c-basic-offset: 4
642 * tab-width: 4
643 * indent-tabs-mode: nil
644 * End:
645 */
646