1 /*
2 * Structure definitions for HVM state that is held by Xen and must
3 * be saved along with the domain's memory and device-model state.
4 *
5 * Copyright (c) 2007 XenSource Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to
9 * deal in the Software without restriction, including without limitation the
10 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
11 * sell copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
27 #define __XEN_PUBLIC_HVM_SAVE_X86_H__
28
29 /*
30 * Save/restore header: general info about the save file.
31 */
32
33 #define HVM_FILE_MAGIC 0x54381286
34 #define HVM_FILE_VERSION 0x00000001
35
36 struct hvm_save_header {
37 uint32_t magic; /* Must be HVM_FILE_MAGIC */
38 uint32_t version; /* File format version */
39 uint64_t changeset; /* Version of Xen that saved this file */
40 uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
41 uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */
42 };
43
44 DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
45
46
47 /*
48 * Processor
49 *
50 * Compat:
51 * - Pre-3.4 didn't have msr_tsc_aux
52 * - Pre-4.7 didn't have fpu_initialised
53 */
54
55 struct hvm_hw_cpu {
56 uint8_t fpu_regs[512];
57
58 uint64_t rax;
59 uint64_t rbx;
60 uint64_t rcx;
61 uint64_t rdx;
62 uint64_t rbp;
63 uint64_t rsi;
64 uint64_t rdi;
65 uint64_t rsp;
66 uint64_t r8;
67 uint64_t r9;
68 uint64_t r10;
69 uint64_t r11;
70 uint64_t r12;
71 uint64_t r13;
72 uint64_t r14;
73 uint64_t r15;
74
75 uint64_t rip;
76 uint64_t rflags;
77
78 uint64_t cr0;
79 uint64_t cr2;
80 uint64_t cr3;
81 uint64_t cr4;
82
83 uint64_t dr0;
84 uint64_t dr1;
85 uint64_t dr2;
86 uint64_t dr3;
87 uint64_t dr6;
88 uint64_t dr7;
89
90 uint32_t cs_sel;
91 uint32_t ds_sel;
92 uint32_t es_sel;
93 uint32_t fs_sel;
94 uint32_t gs_sel;
95 uint32_t ss_sel;
96 uint32_t tr_sel;
97 uint32_t ldtr_sel;
98
99 uint32_t cs_limit;
100 uint32_t ds_limit;
101 uint32_t es_limit;
102 uint32_t fs_limit;
103 uint32_t gs_limit;
104 uint32_t ss_limit;
105 uint32_t tr_limit;
106 uint32_t ldtr_limit;
107 uint32_t idtr_limit;
108 uint32_t gdtr_limit;
109
110 uint64_t cs_base;
111 uint64_t ds_base;
112 uint64_t es_base;
113 uint64_t fs_base;
114 uint64_t gs_base;
115 uint64_t ss_base;
116 uint64_t tr_base;
117 uint64_t ldtr_base;
118 uint64_t idtr_base;
119 uint64_t gdtr_base;
120
121 uint32_t cs_arbytes;
122 uint32_t ds_arbytes;
123 uint32_t es_arbytes;
124 uint32_t fs_arbytes;
125 uint32_t gs_arbytes;
126 uint32_t ss_arbytes;
127 uint32_t tr_arbytes;
128 uint32_t ldtr_arbytes;
129
130 uint64_t sysenter_cs;
131 uint64_t sysenter_esp;
132 uint64_t sysenter_eip;
133
134 /* msr for em64t */
135 uint64_t shadow_gs;
136
137 /* msr content saved/restored. */
138 uint64_t msr_flags; /* Obsolete, ignored. */
139 uint64_t msr_lstar;
140 uint64_t msr_star;
141 uint64_t msr_cstar;
142 uint64_t msr_syscall_mask;
143 uint64_t msr_efer;
144 uint64_t msr_tsc_aux;
145
146 /* guest's idea of what rdtsc() would return */
147 uint64_t tsc;
148
149 /* pending event, if any */
150 union {
151 uint32_t pending_event;
152 struct {
153 uint8_t pending_vector:8;
154 uint8_t pending_type:3;
155 uint8_t pending_error_valid:1;
156 uint32_t pending_reserved:19;
157 uint8_t pending_valid:1;
158 };
159 };
160 /* error code for pending event */
161 uint32_t error_code;
162
163 #define _XEN_X86_FPU_INITIALISED 0
164 #define XEN_X86_FPU_INITIALISED (1U<<_XEN_X86_FPU_INITIALISED)
165 uint32_t flags;
166 uint32_t pad0;
167 };
168
169 struct hvm_hw_cpu_compat {
170 uint8_t fpu_regs[512];
171
172 uint64_t rax;
173 uint64_t rbx;
174 uint64_t rcx;
175 uint64_t rdx;
176 uint64_t rbp;
177 uint64_t rsi;
178 uint64_t rdi;
179 uint64_t rsp;
180 uint64_t r8;
181 uint64_t r9;
182 uint64_t r10;
183 uint64_t r11;
184 uint64_t r12;
185 uint64_t r13;
186 uint64_t r14;
187 uint64_t r15;
188
189 uint64_t rip;
190 uint64_t rflags;
191
192 uint64_t cr0;
193 uint64_t cr2;
194 uint64_t cr3;
195 uint64_t cr4;
196
197 uint64_t dr0;
198 uint64_t dr1;
199 uint64_t dr2;
200 uint64_t dr3;
201 uint64_t dr6;
202 uint64_t dr7;
203
204 uint32_t cs_sel;
205 uint32_t ds_sel;
206 uint32_t es_sel;
207 uint32_t fs_sel;
208 uint32_t gs_sel;
209 uint32_t ss_sel;
210 uint32_t tr_sel;
211 uint32_t ldtr_sel;
212
213 uint32_t cs_limit;
214 uint32_t ds_limit;
215 uint32_t es_limit;
216 uint32_t fs_limit;
217 uint32_t gs_limit;
218 uint32_t ss_limit;
219 uint32_t tr_limit;
220 uint32_t ldtr_limit;
221 uint32_t idtr_limit;
222 uint32_t gdtr_limit;
223
224 uint64_t cs_base;
225 uint64_t ds_base;
226 uint64_t es_base;
227 uint64_t fs_base;
228 uint64_t gs_base;
229 uint64_t ss_base;
230 uint64_t tr_base;
231 uint64_t ldtr_base;
232 uint64_t idtr_base;
233 uint64_t gdtr_base;
234
235 uint32_t cs_arbytes;
236 uint32_t ds_arbytes;
237 uint32_t es_arbytes;
238 uint32_t fs_arbytes;
239 uint32_t gs_arbytes;
240 uint32_t ss_arbytes;
241 uint32_t tr_arbytes;
242 uint32_t ldtr_arbytes;
243
244 uint64_t sysenter_cs;
245 uint64_t sysenter_esp;
246 uint64_t sysenter_eip;
247
248 /* msr for em64t */
249 uint64_t shadow_gs;
250
251 /* msr content saved/restored. */
252 uint64_t msr_flags; /* Obsolete, ignored. */
253 uint64_t msr_lstar;
254 uint64_t msr_star;
255 uint64_t msr_cstar;
256 uint64_t msr_syscall_mask;
257 uint64_t msr_efer;
258 /*uint64_t msr_tsc_aux; COMPAT */
259
260 /* guest's idea of what rdtsc() would return */
261 uint64_t tsc;
262
263 /* pending event, if any */
264 union {
265 uint32_t pending_event;
266 struct {
267 uint8_t pending_vector:8;
268 uint8_t pending_type:3;
269 uint8_t pending_error_valid:1;
270 uint32_t pending_reserved:19;
271 uint8_t pending_valid:1;
272 };
273 };
274 /* error code for pending event */
275 uint32_t error_code;
276 };
277
_hvm_hw_fix_cpu(void * h,uint32_t size)278 static inline int _hvm_hw_fix_cpu(void *h, uint32_t size) {
279
280 union hvm_hw_cpu_union {
281 struct hvm_hw_cpu nat;
282 struct hvm_hw_cpu_compat cmp;
283 } *ucpu = (union hvm_hw_cpu_union *)h;
284
285 if ( size == sizeof(struct hvm_hw_cpu_compat) )
286 {
287 /*
288 * If we copy from the end backwards, we should
289 * be able to do the modification in-place.
290 */
291 ucpu->nat.error_code = ucpu->cmp.error_code;
292 ucpu->nat.pending_event = ucpu->cmp.pending_event;
293 ucpu->nat.tsc = ucpu->cmp.tsc;
294 ucpu->nat.msr_tsc_aux = 0;
295 }
296 /* Mimic the old behaviour by unconditionally setting fpu_initialised. */
297 ucpu->nat.flags = XEN_X86_FPU_INITIALISED;
298
299 return 0;
300 }
301
302 DECLARE_HVM_SAVE_TYPE_COMPAT(CPU, 2, struct hvm_hw_cpu, \
303 struct hvm_hw_cpu_compat, _hvm_hw_fix_cpu);
304
305 /*
306 * PIC
307 */
308
309 struct hvm_hw_vpic {
310 /* IR line bitmasks. */
311 uint8_t irr;
312 uint8_t imr;
313 uint8_t isr;
314
315 /* Line IRx maps to IRQ irq_base+x */
316 uint8_t irq_base;
317
318 /*
319 * Where are we in ICW2-4 initialisation (0 means no init in progress)?
320 * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
321 * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence)
322 * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
323 */
324 uint8_t init_state:4;
325
326 /* IR line with highest priority. */
327 uint8_t priority_add:4;
328
329 /* Reads from A=0 obtain ISR or IRR? */
330 uint8_t readsel_isr:1;
331
332 /* Reads perform a polling read? */
333 uint8_t poll:1;
334
335 /* Automatically clear IRQs from the ISR during INTA? */
336 uint8_t auto_eoi:1;
337
338 /* Automatically rotate IRQ priorities during AEOI? */
339 uint8_t rotate_on_auto_eoi:1;
340
341 /* Exclude slave inputs when considering in-service IRQs? */
342 uint8_t special_fully_nested_mode:1;
343
344 /* Special mask mode excludes masked IRs from AEOI and priority checks. */
345 uint8_t special_mask_mode:1;
346
347 /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
348 uint8_t is_master:1;
349
350 /* Edge/trigger selection. */
351 uint8_t elcr;
352
353 /* Virtual INT output. */
354 uint8_t int_output;
355 };
356
357 DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
358
359
360 /*
361 * IO-APIC
362 */
363
364 union vioapic_redir_entry
365 {
366 uint64_t bits;
367 struct {
368 uint8_t vector;
369 uint8_t delivery_mode:3;
370 uint8_t dest_mode:1;
371 uint8_t delivery_status:1;
372 uint8_t polarity:1;
373 uint8_t remote_irr:1;
374 uint8_t trig_mode:1;
375 uint8_t mask:1;
376 uint8_t reserve:7;
377 uint8_t reserved[4];
378 uint8_t dest_id;
379 } fields;
380 };
381
382 #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
383
384 #define XEN_HVM_VIOAPIC(name, cnt) \
385 struct name { \
386 uint64_t base_address; \
387 uint32_t ioregsel; \
388 uint32_t id; \
389 union vioapic_redir_entry redirtbl[cnt]; \
390 }
391
392 XEN_HVM_VIOAPIC(hvm_hw_vioapic, VIOAPIC_NUM_PINS);
393
394 #ifndef __XEN__
395 #undef XEN_HVM_VIOAPIC
396 #else
397 #undef VIOAPIC_NUM_PINS
398 #endif
399
400 DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
401
402
403 /*
404 * LAPIC
405 */
406
407 struct hvm_hw_lapic {
408 uint64_t apic_base_msr;
409 uint32_t disabled; /* VLAPIC_xx_DISABLED */
410 uint32_t timer_divisor;
411 uint64_t tdt_msr;
412 };
413
414 DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
415
416 struct hvm_hw_lapic_regs {
417 uint8_t data[1024];
418 };
419
420 DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
421
422
423 /*
424 * IRQs
425 */
426
427 struct hvm_hw_pci_irqs {
428 /*
429 * Virtual interrupt wires for a single PCI bus.
430 * Indexed by: device*4 + INTx#.
431 */
432 union {
433 unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */
434 uint64_t pad[2];
435 };
436 };
437
438 DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
439
440 struct hvm_hw_isa_irqs {
441 /*
442 * Virtual interrupt wires for ISA devices.
443 * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
444 */
445 union {
446 unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */
447 uint64_t pad[1];
448 };
449 };
450
451 DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
452
453 struct hvm_hw_pci_link {
454 /*
455 * PCI-ISA interrupt router.
456 * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
457 * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
458 * The router provides a programmable mapping from each link to a GSI.
459 */
460 uint8_t route[4];
461 uint8_t pad0[4];
462 };
463
464 DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
465
466 /*
467 * PIT
468 */
469
470 struct hvm_hw_pit {
471 struct hvm_hw_pit_channel {
472 uint32_t count; /* can be 65536 */
473 uint16_t latched_count;
474 uint8_t count_latched;
475 uint8_t status_latched;
476 uint8_t status;
477 uint8_t read_state;
478 uint8_t write_state;
479 uint8_t write_latch;
480 uint8_t rw_mode;
481 uint8_t mode;
482 uint8_t bcd; /* not supported */
483 uint8_t gate; /* timer start */
484 } channels[3]; /* 3 x 16 bytes */
485 uint32_t speaker_data_on;
486 uint32_t pad0;
487 };
488
489 DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
490
491
492 /*
493 * RTC
494 */
495
496 #define RTC_CMOS_SIZE 14
497 struct hvm_hw_rtc {
498 /* CMOS bytes */
499 uint8_t cmos_data[RTC_CMOS_SIZE];
500 /* Index register for 2-part operations */
501 uint8_t cmos_index;
502 uint8_t pad0;
503 };
504
505 DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
506
507
508 /*
509 * HPET
510 */
511
512 #define HPET_TIMER_NUM 3 /* 3 timers supported now */
513 struct hvm_hw_hpet {
514 /* Memory-mapped, software visible registers */
515 uint64_t capability; /* capabilities */
516 uint64_t res0; /* reserved */
517 uint64_t config; /* configuration */
518 uint64_t res1; /* reserved */
519 uint64_t isr; /* interrupt status reg */
520 uint64_t res2[25]; /* reserved */
521 uint64_t mc64; /* main counter */
522 uint64_t res3; /* reserved */
523 struct { /* timers */
524 uint64_t config; /* configuration/cap */
525 uint64_t cmp; /* comparator */
526 uint64_t fsb; /* FSB route, not supported now */
527 uint64_t res4; /* reserved */
528 } timers[HPET_TIMER_NUM];
529 uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */
530
531 /* Hidden register state */
532 uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
533 };
534
535 DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
536
537
538 /*
539 * PM timer
540 */
541
542 struct hvm_hw_pmtimer {
543 uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
544 uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
545 uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
546 };
547
548 DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
549
550 /*
551 * MTRR MSRs
552 */
553
554 struct hvm_hw_mtrr {
555 #define MTRR_VCNT 8
556 #define NUM_FIXED_MSR 11
557 uint64_t msr_pat_cr;
558 /* mtrr physbase & physmask msr pair*/
559 uint64_t msr_mtrr_var[MTRR_VCNT*2];
560 uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
561 uint64_t msr_mtrr_cap;
562 uint64_t msr_mtrr_def_type;
563 };
564
565 DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
566
567 /*
568 * The save area of XSAVE/XRSTOR.
569 */
570
571 struct hvm_hw_cpu_xsave {
572 uint64_t xfeature_mask; /* Ignored */
573 uint64_t xcr0; /* Updated by XSETBV */
574 uint64_t xcr0_accum; /* Updated by XSETBV */
575 struct {
576 struct { char x[512]; } fpu_sse;
577
578 struct hvm_hw_cpu_xsave_hdr {
579 uint64_t xstate_bv; /* Updated by XRSTOR */
580 uint64_t xcomp_bv; /* Updated by XRSTOR{C,S} */
581 uint64_t reserved[6];
582 } xsave_hdr; /* The 64-byte header */
583 } save_area;
584 };
585
586 #define CPU_XSAVE_CODE 16
587
588 /*
589 * Viridian hypervisor context.
590 */
591
592 struct hvm_viridian_domain_context {
593 uint64_t hypercall_gpa;
594 uint64_t guest_os_id;
595 uint64_t time_ref_count;
596 uint64_t reference_tsc;
597 };
598
599 DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
600
601 struct hvm_viridian_vcpu_context {
602 uint64_t vp_assist_msr;
603 uint8_t vp_assist_vector;
604 uint8_t _pad[7];
605 };
606
607 DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
608
609 struct hvm_vmce_vcpu {
610 uint64_t caps;
611 uint64_t mci_ctl2_bank0;
612 uint64_t mci_ctl2_bank1;
613 uint64_t mcg_ext_ctl;
614 };
615
616 DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
617
618 struct hvm_tsc_adjust {
619 uint64_t tsc_adjust;
620 };
621
622 DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
623
624
625 struct hvm_msr {
626 uint32_t count;
627 struct hvm_one_msr {
628 uint32_t index;
629 uint32_t _rsvd;
630 uint64_t val;
631 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
632 } msr[];
633 #elif defined(__GNUC__)
634 } msr[0];
635 #else
636 } msr[1 /* variable size */];
637 #endif
638 };
639
640 #define CPU_MSR_CODE 20
641
642 /*
643 * Largest type-code in use
644 */
645 #define HVM_SAVE_CODE_MAX 20
646
647 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
648
649 /*
650 * Local variables:
651 * mode: C
652 * c-file-style: "BSD"
653 * c-basic-offset: 4
654 * tab-width: 4
655 * indent-tabs-mode: nil
656 * End:
657 */
658