1 #ifndef __ARCH_DESC_H
2 #define __ARCH_DESC_H
3
4 /*
5 * Xen reserves a memory page of GDT entries.
6 * No guest GDT entries exist beyond the Xen reserved area.
7 */
8 #define NR_RESERVED_GDT_PAGES 1
9 #define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE)
10 #define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8)
11
12 #define LAST_RESERVED_GDT_PAGE \
13 (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1)
14 #define LAST_RESERVED_GDT_BYTE \
15 (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1)
16 #define LAST_RESERVED_GDT_ENTRY \
17 (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
18
19 #define LDT_ENTRY_SIZE 8
20
21 #define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */
22 #define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */
23 #define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */
24 #define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */
25 #define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */
26 #define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */
27
28 #define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS
29 #define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS
30 #define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS
31 #define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS
32 #define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS
33 #define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS
34
35 #define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
36 #define LDT_ENTRY (TSS_ENTRY + 2)
37 #define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)
38
39 #ifndef __ASSEMBLY__
40
41 #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
42
43 /* Fix up the RPL of a guest segment selector. */
44 #define __fixup_guest_selector(d, sel) \
45 ({ \
46 uint16_t _rpl = GUEST_KERNEL_RPL(d); \
47 (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \
48 })
49
50 #define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss)
51 #define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs)
52
53 /*
54 * We need this function because enforcing the correct guest kernel RPL is
55 * unsufficient if the selector is poked into an interrupt, trap or call gate.
56 * The selector RPL is ignored when a gate is accessed. We must therefore make
57 * sure that the selector does not reference a Xen-private segment.
58 *
59 * Note that selectors used only by IRET do not need to be checked. If the
60 * descriptor DPL fiffers from CS RPL then we'll #GP.
61 *
62 * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are
63 * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs
64 * from CS RPL then we'll #GP.
65 */
66 #define guest_gate_selector_okay(d, sel) \
67 ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
68 ((sel) == (!is_pv_32bit_domain(d) ? \
69 FLAT_KERNEL_CS : /* Xen default seg? */ \
70 FLAT_COMPAT_KERNEL_CS)) || \
71 ((sel) & 4)) /* LDT seg? */
72
73 #endif /* __ASSEMBLY__ */
74
75 /* These are bitmasks for the high 32 bits of a descriptor table entry. */
76 #define _SEGMENT_TYPE (15<< 8)
77 #define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code)
78 segment */
79 #define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */
80 #define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system
81 segments */
82 #define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */
83 #define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */
84 #define _SEGMENT_P ( 1<<15) /* Segment Present */
85 #define _SEGMENT_L ( 1<<21) /* 64-bit segment */
86 #define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */
87 #define _SEGMENT_G ( 1<<23) /* Granularity */
88
89 #ifndef __ASSEMBLY__
90
91 /* System Descriptor types for GDT and IDT entries. */
92 #define SYS_DESC_tss16_avail 1
93 #define SYS_DESC_ldt 2
94 #define SYS_DESC_tss16_busy 3
95 #define SYS_DESC_call_gate16 4
96 #define SYS_DESC_task_gate 5
97 #define SYS_DESC_irq_gate16 6
98 #define SYS_DESC_trap_gate16 7
99 #define SYS_DESC_tss_avail 9
100 #define SYS_DESC_tss_busy 11
101 #define SYS_DESC_call_gate 12
102 #define SYS_DESC_irq_gate 14
103 #define SYS_DESC_trap_gate 15
104
105 struct desc_struct {
106 u32 a, b;
107 };
108
109 typedef struct {
110 u64 a, b;
111 } idt_entry_t;
112
113 /* Write the lower 64 bits of an IDT Entry. This relies on the upper 32
114 * bits of the address not changing, which is a safe assumption as all
115 * functions we are likely to load will live inside the 1GB
116 * code/data/bss address range.
117 *
118 * Ideally, we would use cmpxchg16b, but this is not supported on some
119 * old AMD 64bit capable processors, and has no safe equivalent.
120 */
_write_gate_lower(volatile idt_entry_t * gate,const idt_entry_t * new)121 static inline void _write_gate_lower(volatile idt_entry_t *gate,
122 const idt_entry_t *new)
123 {
124 ASSERT(gate->b == new->b);
125 gate->a = new->a;
126 }
127
128 #define _set_gate(gate_addr,type,dpl,addr) \
129 do { \
130 (gate_addr)->a = 0; \
131 wmb(); /* disable gate /then/ rewrite */ \
132 (gate_addr)->b = \
133 ((unsigned long)(addr) >> 32); \
134 wmb(); /* rewrite /then/ enable gate */ \
135 (gate_addr)->a = \
136 (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
137 ((unsigned long)(dpl) << 45) | \
138 ((unsigned long)(type) << 40) | \
139 ((unsigned long)(addr) & 0xFFFFUL) | \
140 ((unsigned long)__HYPERVISOR_CS64 << 16) | \
141 (1UL << 47); \
142 } while (0)
143
_set_gate_lower(idt_entry_t * gate,unsigned long type,unsigned long dpl,void * addr)144 static inline void _set_gate_lower(idt_entry_t *gate, unsigned long type,
145 unsigned long dpl, void *addr)
146 {
147 idt_entry_t idte;
148 idte.b = gate->b;
149 idte.a =
150 (((unsigned long)(addr) & 0xFFFF0000UL) << 32) |
151 ((unsigned long)(dpl) << 45) |
152 ((unsigned long)(type) << 40) |
153 ((unsigned long)(addr) & 0xFFFFUL) |
154 ((unsigned long)__HYPERVISOR_CS64 << 16) |
155 (1UL << 47);
156 _write_gate_lower(gate, &idte);
157 }
158
159 /* Update the lower half handler of an IDT Entry, without changing any
160 * other configuration. */
_update_gate_addr_lower(idt_entry_t * gate,void * addr)161 static inline void _update_gate_addr_lower(idt_entry_t *gate, void *addr)
162 {
163 idt_entry_t idte;
164 idte.a = gate->a;
165
166 idte.b = ((unsigned long)(addr) >> 32);
167 idte.a &= 0x0000FFFFFFFF0000ULL;
168 idte.a |= (((unsigned long)(addr) & 0xFFFF0000UL) << 32) |
169 ((unsigned long)(addr) & 0xFFFFUL);
170
171 _write_gate_lower(gate, &idte);
172 }
173
174 #define _set_tssldt_desc(desc,addr,limit,type) \
175 do { \
176 (desc)[0].b = (desc)[1].b = 0; \
177 wmb(); /* disable entry /then/ rewrite */ \
178 (desc)[0].a = \
179 ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
180 (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
181 wmb(); /* rewrite /then/ enable entry */ \
182 (desc)[0].b = \
183 ((u32)(addr) & 0xFF000000U) | \
184 ((u32)(type) << 8) | 0x8000U | \
185 (((u32)(addr) & 0x00FF0000U) >> 16); \
186 } while (0)
187
188 struct __packed desc_ptr {
189 unsigned short limit;
190 unsigned long base;
191 };
192
193 extern struct desc_struct boot_cpu_gdt_table[];
194 DECLARE_PER_CPU(struct desc_struct *, gdt_table);
195 extern struct desc_struct boot_cpu_compat_gdt_table[];
196 DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);
197
198 extern void load_TR(void);
199
200 #endif /* !__ASSEMBLY__ */
201
202 #endif /* __ARCH_DESC_H */
203