1 /*
2  * include/asm-i386/xstate.h
3  *
4  * x86 extended state (xsave/xrstor) related definitions
5  *
6  */
7 
8 #ifndef __ASM_XSTATE_H
9 #define __ASM_XSTATE_H
10 
11 #include <xen/sched.h>
12 #include <asm/cpufeature.h>
13 #include <asm/x86-defns.h>
14 
15 #define FCW_DEFAULT               0x037f
16 #define FCW_RESET                 0x0040
17 #define FXSAVE_FTW_RESET          0xFF /* Abridged Tag Word format */
18 #define MXCSR_DEFAULT             0x1f80
19 
20 extern uint32_t mxcsr_mask;
21 
22 #define XSTATE_CPUID              0x0000000d
23 
24 #define XCR_XFEATURE_ENABLED_MASK 0x00000000  /* index of XCR0 */
25 
26 #define XSAVE_HDR_SIZE            64
27 #define XSAVE_SSE_OFFSET          160
28 #define XSTATE_YMM_SIZE           256
29 #define FXSAVE_SIZE               512
30 #define XSAVE_HDR_OFFSET          FXSAVE_SIZE
31 #define XSTATE_AREA_MIN_SIZE      (FXSAVE_SIZE + XSAVE_HDR_SIZE)
32 
33 #define XSTATE_FP_SSE  (X86_XCR0_X87 | X86_XCR0_SSE)
34 #define XCNTXT_MASK    (X86_XCR0_X87 | X86_XCR0_SSE | X86_XCR0_YMM | \
35                         X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM | \
36                         XSTATE_NONLAZY)
37 
38 #define XSTATE_ALL     (~(1ULL << 63))
39 #define XSTATE_NONLAZY (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | X86_XCR0_PKRU | \
40                         X86_XCR0_TILE_CFG | X86_XCR0_TILE_DATA)
41 #define XSTATE_LAZY    (XSTATE_ALL & ~XSTATE_NONLAZY)
42 #define XSTATE_XSAVES_ONLY         0
43 #define XSTATE_COMPACTION_ENABLED  (1ULL << 63)
44 
45 #define XSTATE_XSS     (1U << 0)
46 #define XSTATE_ALIGN64 (1U << 1)
47 
48 extern u64 xfeature_mask;
49 extern u64 xstate_align;
50 extern unsigned int *xstate_offsets;
51 extern unsigned int *xstate_sizes;
52 
53 /* extended state save area */
54 struct __attribute__((aligned (64))) xsave_struct
55 {
56     union __attribute__((aligned(16))) {     /* FPU/MMX, SSE */
57         char x[512];
58         struct {
59             uint16_t fcw;
60             uint16_t fsw;
61             uint8_t ftw;
62             uint8_t rsvd1;
63             uint16_t fop;
64             union {
65                 uint64_t addr;
66                 struct {
67                     uint32_t offs;
68                     uint16_t sel;
69                     uint16_t rsvd;
70                 };
71             } fip, fdp;
72             uint32_t mxcsr;
73             uint32_t mxcsr_mask;
74             /* data registers follow here */
75         };
76     } fpu_sse;
77 
78     struct xsave_hdr {
79         u64 xstate_bv;
80         u64 xcomp_bv;
81         u64 reserved[6];
82     } xsave_hdr;                             /* The 64-byte header */
83 
84     char data[];                             /* Variable layout states */
85 };
86 
87 typedef typeof(((struct xsave_struct){}).fpu_sse) fpusse_t;
88 
89 struct xstate_bndcsr {
90     uint64_t bndcfgu;
91     uint64_t bndstatus;
92 };
93 
94 /* extended state operations */
95 bool __must_check set_xcr0(u64 xfeatures);
96 uint64_t get_xcr0(void);
97 void set_msr_xss(u64 xss);
98 uint64_t get_msr_xss(void);
99 uint64_t read_bndcfgu(void);
100 void xsave(struct vcpu *v, uint64_t mask);
101 void xrstor(struct vcpu *v, uint64_t mask);
102 void xstate_set_init(uint64_t mask);
103 bool xsave_enabled(const struct vcpu *v);
104 int __must_check validate_xstate(const struct domain *d,
105                                  uint64_t xcr0, uint64_t xcr0_accum,
106                                  const struct xsave_hdr *hdr);
107 int __must_check handle_xsetbv(u32 index, u64 new_bv);
108 void expand_xsave_states(const struct vcpu *v, void *dest, unsigned int size);
109 void compress_xsave_states(struct vcpu *v, const void *src, unsigned int size);
110 
111 /* extended state init and cleanup functions */
112 void xstate_free_save_area(struct vcpu *v);
113 int xstate_alloc_save_area(struct vcpu *v);
114 void xstate_init(struct cpuinfo_x86 *c);
115 unsigned int xstate_uncompressed_size(uint64_t xcr0);
116 unsigned int xstate_compressed_size(uint64_t xstates);
117 
xgetbv(unsigned int index)118 static inline uint64_t xgetbv(unsigned int index)
119 {
120     uint32_t lo, hi;
121 
122     ASSERT(index); /* get_xcr0() should be used instead. */
123     asm volatile ( ".byte 0x0f,0x01,0xd0" /* xgetbv */
124                    : "=a" (lo), "=d" (hi) : "c" (index) );
125 
126     return lo | ((uint64_t)hi << 32);
127 }
128 
129 static inline bool __nonnull(1)
xsave_area_compressed(const struct xsave_struct * xsave_area)130 xsave_area_compressed(const struct xsave_struct *xsave_area)
131 {
132     return xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED;
133 }
134 
xstate_all(const struct vcpu * v)135 static inline bool xstate_all(const struct vcpu *v)
136 {
137     /*
138      * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE
139      * (in the legacy region of xsave area) are fixed, so saving
140      * XSTATE_FP_SSE will not cause overwriting problem with XSAVES/XSAVEC.
141      */
142     return xsave_area_compressed(v->arch.xsave_area) &&
143            (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE);
144 }
145 
146 /*
147  * Fetch a pointer to a vCPU's XSAVE area
148  *
149  * TL;DR: If v == current, the mapping is guaranteed to already exist.
150  *
151  * Despite the name, this macro might not actually map anything. The only case
152  * in which a mutation of page tables is strictly required is when ASI==on &&
153  * v!=current. For everything else the mapping already exists and needs not
154  * be created nor destroyed.
155  *
156  *                         +-----------------+--------------+
157  *                         |   v == current  | v != current |
158  *          +--------------+-----------------+--------------+
159  *          | ASI  enabled | per-vCPU fixmap |  actual map  |
160  *          +--------------+-----------------+--------------+
161  *          | ASI disabled |             directmap          |
162  *          +--------------+--------------------------------+
163  *
164  * There MUST NOT be outstanding maps of XSAVE areas of the non-current vCPU
165  * at the point of context switch. Otherwise, the unmap operation will
166  * misbehave.
167  *
168  * TODO: Expand the macro to the ASI cases after infra to do so is in place.
169  *
170  * @param v Owner of the XSAVE area
171  */
172 #define VCPU_MAP_XSAVE_AREA(v) ((v)->arch.xsave_area)
173 
174 /*
175  * Drops the mapping of a vCPU's XSAVE area and nullifies its pointer on exit
176  *
177  * See VCPU_MAP_XSAVE_AREA() for additional information on the persistence of
178  * these mappings. This macro only tears down the mappings in the ASI=on &&
179  * v!=current case.
180  *
181  * TODO: Expand the macro to the ASI cases after infra to do so is in place.
182  *
183  * @param v Owner of the XSAVE area
184  * @param x XSAVE blob of v
185  */
186 #define VCPU_UNMAP_XSAVE_AREA(v, x) do { (void)(v); (x) = NULL; } while (0)
187 
188 #endif /* __ASM_XSTATE_H */
189