1 /*
2  * include/asm-i386/xstate.h
3  *
4  * x86 extended state (xsave/xrstor) related definitions
5  *
6  */
7 
8 #ifndef __ASM_XSTATE_H
9 #define __ASM_XSTATE_H
10 
11 #include <xen/sched.h>
12 #include <asm/cpufeature.h>
13 
14 #define FCW_DEFAULT               0x037f
15 #define FCW_RESET                 0x0040
16 #define MXCSR_DEFAULT             0x1f80
17 
18 extern uint32_t mxcsr_mask;
19 
20 #define XSTATE_CPUID              0x0000000d
21 
22 #define XCR_XFEATURE_ENABLED_MASK 0x00000000  /* index of XCR0 */
23 
24 #define XSAVE_HDR_SIZE            64
25 #define XSAVE_SSE_OFFSET          160
26 #define XSTATE_YMM_SIZE           256
27 #define FXSAVE_SIZE               512
28 #define XSAVE_HDR_OFFSET          FXSAVE_SIZE
29 #define XSTATE_AREA_MIN_SIZE      (FXSAVE_SIZE + XSAVE_HDR_SIZE)
30 
31 #define _XSTATE_FP                0
32 #define XSTATE_FP                 (1ULL << _XSTATE_FP)
33 #define _XSTATE_SSE               1
34 #define XSTATE_SSE                (1ULL << _XSTATE_SSE)
35 #define _XSTATE_YMM               2
36 #define XSTATE_YMM                (1ULL << _XSTATE_YMM)
37 #define _XSTATE_BNDREGS           3
38 #define XSTATE_BNDREGS            (1ULL << _XSTATE_BNDREGS)
39 #define _XSTATE_BNDCSR            4
40 #define XSTATE_BNDCSR             (1ULL << _XSTATE_BNDCSR)
41 #define _XSTATE_OPMASK            5
42 #define XSTATE_OPMASK             (1ULL << _XSTATE_OPMASK)
43 #define _XSTATE_ZMM               6
44 #define XSTATE_ZMM                (1ULL << _XSTATE_ZMM)
45 #define _XSTATE_HI_ZMM            7
46 #define XSTATE_HI_ZMM             (1ULL << _XSTATE_HI_ZMM)
47 #define _XSTATE_PKRU              9
48 #define XSTATE_PKRU               (1ULL << _XSTATE_PKRU)
49 #define _XSTATE_LWP               62
50 #define XSTATE_LWP                (1ULL << _XSTATE_LWP)
51 
52 #define XSTATE_FP_SSE  (XSTATE_FP | XSTATE_SSE)
53 #define XCNTXT_MASK    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \
54                         XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY)
55 
56 #define XSTATE_ALL     (~(1ULL << 63))
57 #define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS | XSTATE_BNDCSR | \
58                         XSTATE_PKRU)
59 #define XSTATE_LAZY    (XSTATE_ALL & ~XSTATE_NONLAZY)
60 #define XSTATE_XSAVES_ONLY         0
61 #define XSTATE_COMPACTION_ENABLED  (1ULL << 63)
62 
63 #define XSTATE_ALIGN64 (1U << 1)
64 
65 extern u64 xfeature_mask;
66 extern u64 xstate_align;
67 extern unsigned int *xstate_offsets;
68 extern unsigned int *xstate_sizes;
69 
70 /* extended state save area */
71 struct __attribute__((aligned (64))) xsave_struct
72 {
73     union __attribute__((aligned(16))) {     /* FPU/MMX, SSE */
74         char x[512];
75         struct {
76             uint16_t fcw;
77             uint16_t fsw;
78             uint8_t ftw;
79             uint8_t rsvd1;
80             uint16_t fop;
81             union {
82                 uint64_t addr;
83                 struct {
84                     uint32_t offs;
85                     uint16_t sel;
86                     uint16_t rsvd;
87                 };
88             } fip, fdp;
89             uint32_t mxcsr;
90             uint32_t mxcsr_mask;
91             /* data registers follow here */
92         };
93     } fpu_sse;
94 
95     struct xsave_hdr {
96         u64 xstate_bv;
97         u64 xcomp_bv;
98         u64 reserved[6];
99     } xsave_hdr;                             /* The 64-byte header */
100 
101     char data[];                             /* Variable layout states */
102 };
103 
104 struct xstate_bndcsr {
105     uint64_t bndcfgu;
106     uint64_t bndstatus;
107 };
108 
109 /* extended state operations */
110 bool __must_check set_xcr0(u64 xfeatures);
111 uint64_t get_xcr0(void);
112 void set_msr_xss(u64 xss);
113 uint64_t get_msr_xss(void);
114 uint64_t read_bndcfgu(void);
115 void xsave(struct vcpu *v, uint64_t mask);
116 void xrstor(struct vcpu *v, uint64_t mask);
117 void xstate_set_init(uint64_t mask);
118 bool xsave_enabled(const struct vcpu *v);
119 int __must_check validate_xstate(u64 xcr0, u64 xcr0_accum,
120                                  const struct xsave_hdr *);
121 int __must_check handle_xsetbv(u32 index, u64 new_bv);
122 void expand_xsave_states(struct vcpu *v, void *dest, unsigned int size);
123 void compress_xsave_states(struct vcpu *v, const void *src, unsigned int size);
124 
125 /* extended state init and cleanup functions */
126 void xstate_free_save_area(struct vcpu *v);
127 int xstate_alloc_save_area(struct vcpu *v);
128 void xstate_init(struct cpuinfo_x86 *c);
129 unsigned int xstate_ctxt_size(u64 xcr0);
130 
xstate_all(const struct vcpu * v)131 static inline bool xstate_all(const struct vcpu *v)
132 {
133     /*
134      * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE
135      * (in the legacy region of xsave area) are fixed, so saving
136      * XSTATE_FP_SSE will not cause overwriting problem with XSAVES/XSAVEC.
137      */
138     return (v->arch.xsave_area->xsave_hdr.xcomp_bv &
139             XSTATE_COMPACTION_ENABLED) &&
140            (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE);
141 }
142 
143 static inline bool __nonnull(1)
xsave_area_compressed(const struct xsave_struct * xsave_area)144 xsave_area_compressed(const struct xsave_struct *xsave_area)
145 {
146     return xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED;
147 }
148 
149 #endif /* __ASM_XSTATE_H */
150