1 #ifndef __ARCH_DESC_H
2 #define __ARCH_DESC_H
3
4 #include <asm/page.h>
5
6 /*
7 * Xen reserves a memory page of GDT entries.
8 * No guest GDT entries exist beyond the Xen reserved area.
9 */
10 #define NR_RESERVED_GDT_PAGES 1
11 #define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE)
12 #define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8)
13
14 #define LAST_RESERVED_GDT_PAGE \
15 (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1)
16 #define LAST_RESERVED_GDT_BYTE \
17 (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1)
18 #define LAST_RESERVED_GDT_ENTRY \
19 (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
20
21 #define LDT_ENTRY_SIZE 8
22
23 #define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */
24 #define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */
25 #define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */
26 #define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */
27 #define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */
28 #define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */
29
30 #define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS
31 #define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS
32 #define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS
33 #define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS
34 #define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS
35 #define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS
36
37 #define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
38 #define LDT_ENTRY (TSS_ENTRY + 2)
39 #define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)
40
41 #define TSS_SELECTOR (TSS_ENTRY << 3)
42 #define LDT_SELECTOR (LDT_ENTRY << 3)
43 #define PER_CPU_SELECTOR (PER_CPU_GDT_ENTRY << 3)
44
45 #ifndef __ASSEMBLY__
46
47 #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
48
49 /* Fix up the RPL of a guest segment selector. */
50 #define __fixup_guest_selector(d, sel) \
51 ({ \
52 uint16_t _rpl = GUEST_KERNEL_RPL(d); \
53 (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \
54 })
55
56 #define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss)
57 #define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs)
58
59 /*
60 * We need this function because enforcing the correct guest kernel RPL is
61 * unsufficient if the selector is poked into an interrupt, trap or call gate.
62 * The selector RPL is ignored when a gate is accessed. We must therefore make
63 * sure that the selector does not reference a Xen-private segment.
64 *
65 * Note that selectors used only by IRET do not need to be checked. If the
66 * descriptor DPL fiffers from CS RPL then we'll #GP.
67 *
68 * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are
69 * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs
70 * from CS RPL then we'll #GP.
71 */
72 #define guest_gate_selector_okay(d, sel) \
73 ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
74 ((sel) == (!is_pv_32bit_domain(d) ? \
75 FLAT_KERNEL_CS : /* Xen default seg? */ \
76 FLAT_COMPAT_KERNEL_CS)) || \
77 ((sel) & 4)) /* LDT seg? */
78
79 #endif /* __ASSEMBLY__ */
80
81 /* These are bitmasks for the high 32 bits of a descriptor table entry. */
82 #define _SEGMENT_TYPE (15<< 8)
83 #define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code)
84 segment */
85 #define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */
86 #define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system
87 segments */
88 #define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */
89 #define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */
90 #define _SEGMENT_P ( 1<<15) /* Segment Present */
91 #define _SEGMENT_L ( 1<<21) /* 64-bit segment */
92 #define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */
93 #define _SEGMENT_G ( 1<<23) /* Granularity */
94
95 #ifndef __ASSEMBLY__
96
97 /* System Descriptor types for GDT and IDT entries. */
98 #define SYS_DESC_tss16_avail 1
99 #define SYS_DESC_ldt 2
100 #define SYS_DESC_tss16_busy 3
101 #define SYS_DESC_call_gate16 4
102 #define SYS_DESC_task_gate 5
103 #define SYS_DESC_irq_gate16 6
104 #define SYS_DESC_trap_gate16 7
105 #define SYS_DESC_tss_avail 9
106 #define SYS_DESC_tss_busy 11
107 #define SYS_DESC_call_gate 12
108 #define SYS_DESC_irq_gate 14
109 #define SYS_DESC_trap_gate 15
110
111 typedef union {
112 uint64_t raw;
113 struct {
114 uint32_t a, b;
115 };
116 } seg_desc_t;
117
118 #define _set_tssldt_desc(desc,addr,limit,type) \
119 do { \
120 (desc)[0].b = (desc)[1].b = 0; \
121 smp_wmb(); /* disable entry /then/ rewrite */ \
122 (desc)[0].a = \
123 ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
124 (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
125 smp_wmb(); /* rewrite /then/ enable entry */ \
126 (desc)[0].b = \
127 ((u32)(addr) & 0xFF000000U) | \
128 ((u32)(type) << 8) | 0x8000U | \
129 (((u32)(addr) & 0x00FF0000U) >> 16); \
130 } while (0)
131
132 struct __packed desc_ptr {
133 unsigned short limit;
134 unsigned long base;
135 };
136
137 extern seg_desc_t boot_gdt[];
138 DECLARE_PER_CPU(seg_desc_t *, gdt);
139 DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
140 extern seg_desc_t boot_compat_gdt[];
141 DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
142 DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
143 DECLARE_PER_CPU(bool, full_gdt_loaded);
144
lgdt(const struct desc_ptr * gdtr)145 static inline void lgdt(const struct desc_ptr *gdtr)
146 {
147 __asm__ __volatile__ ( "lgdt %0" :: "m" (*gdtr) : "memory" );
148 }
149
lidt(const struct desc_ptr * idtr)150 static inline void lidt(const struct desc_ptr *idtr)
151 {
152 __asm__ __volatile__ ( "lidt %0" :: "m" (*idtr) : "memory" );
153 }
154
lldt(unsigned int sel)155 static inline void lldt(unsigned int sel)
156 {
157 __asm__ __volatile__ ( "lldt %w0" :: "rm" (sel) : "memory" );
158 }
159
ltr(unsigned int sel)160 static inline void ltr(unsigned int sel)
161 {
162 __asm__ __volatile__ ( "ltr %w0" :: "rm" (sel) : "memory" );
163 }
164
165 #endif /* !__ASSEMBLY__ */
166
167 #endif /* __ARCH_DESC_H */
168