1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2015 Intel Corporation
4 * Copyright (c) 2016 Travis Geiselbrecht
5 *
6 * Use of this source code is governed by a MIT-style
7 * license that can be found in the LICENSE file or at
8 * https://opensource.org/licenses/MIT
9 */
10 #pragma once
11
12 #include <lk/compiler.h>
13 #include <sys/types.h>
14 #include <stdlib.h>
15 #include <stdbool.h>
16
17 __BEGIN_CDECLS
18
19 #define PFEX_P 0x01
20 #define PFEX_W 0x02
21 #define PFEX_U 0x04
22 #define PFEX_RSV 0x08
23 #define PFEX_I 0x10
24 #define X86_8BYTE_MASK 0xFFFFFFFF
25 #define X86_CPUID_ADDR_WIDTH 0x80000008
26
27 struct x86_32_iframe {
28 uint32_t di, si, bp, sp, bx, dx, cx, ax; // pushed by common handler using pusha
29 uint32_t ds, es, fs, gs; // pushed by common handler
30 uint32_t vector; // pushed by stub
31 uint32_t err_code; // pushed by interrupt or stub
32 uint32_t ip, cs, flags; // pushed by interrupt
33 uint32_t user_sp, user_ss; // pushed by interrupt if priv change occurs
34 };
35
36 struct x86_64_iframe {
37 uint64_t di, si, bp, bx, dx, cx, ax; // pushed by common handler
38 uint64_t r8, r9, r10, r11, r12, r13, r14, r15; // pushed by common handler
39 uint64_t vector; // pushed by stub
40 uint64_t err_code; // pushed by interrupt or stub
41 uint64_t ip, cs, flags; // pushed by interrupt
42 uint64_t user_sp, user_ss; // pushed by interrupt if priv change occurs
43 };
44
45 #if ARCH_X86_32
46 typedef struct x86_32_iframe x86_iframe_t;
47 #elif ARCH_X86_64
48 typedef struct x86_64_iframe x86_iframe_t;
49 #endif
50
51 struct x86_32_context_switch_frame {
52 uint32_t edi, esi, ebp, esp, ebx, edx, ecx, eax;
53 uint32_t eflags;
54 uint32_t eip;
55 };
56
57 struct x86_64_context_switch_frame {
58 uint64_t r15, r14, r13, r12;
59 uint64_t rbp;
60 uint64_t rbx;
61 uint64_t rflags;
62 uint64_t rip;
63 };
64
65 void x86_64_context_switch(vaddr_t *oldsp, vaddr_t newsp);
66
67 /*
68 * x86-32 TSS structure
69 */
70 typedef struct {
71 uint16_t backlink, __blh;
72 uint32_t esp0;
73 uint16_t ss0, __ss0h;
74 uint32_t esp1;
75 uint16_t ss1, __ss1h;
76 uint32_t esp2;
77 uint16_t ss2, __ss2h;
78 uint32_t cr3;
79 uint32_t eip;
80 uint32_t eflags;
81 uint32_t eax, ecx, edx, ebx;
82 uint32_t esp, ebp, esi, edi;
83 uint16_t es, __esh;
84 uint16_t cs, __csh;
85 uint16_t ss, __ssh;
86 uint16_t ds, __dsh;
87 uint16_t fs, __fsh;
88 uint16_t gs, __gsh;
89 uint16_t ldt, __ldth;
90 uint16_t trace, bitmap;
91
92 uint8_t tss_bitmap[8192];
93 } __PACKED tss_32_t;
94
95 /*
96 * x86-64 TSS structure
97 */
98 typedef struct {
99 uint32_t rsvd0;
100 uint64_t rsp0;
101 uint64_t rsp1;
102 uint64_t rsp2;
103 uint32_t rsvd1;
104 uint32_t rsvd2;
105 uint64_t ist1;
106 uint64_t ist2;
107 uint64_t ist3;
108 uint64_t ist4;
109 uint64_t ist5;
110 uint64_t ist6;
111 uint64_t ist7;
112 uint32_t rsvd3;
113 uint32_t rsvd4;
114 uint16_t rsvd5;
115 uint16_t iomap_base;
116 } __PACKED tss_64_t;
117
118 #if ARCH_X86_32
119 typedef tss_32_t tss_t;
120 #elif ARCH_X86_64
121 typedef tss_64_t tss_t;
122 #endif
123
124 #define X86_CR0_PE 0x00000001 /* protected mode enable */
125 #define X86_CR0_MP 0x00000002 /* monitor coprocessor */
126 #define X86_CR0_EM 0x00000004 /* emulation */
127 #define X86_CR0_TS 0x00000008 /* task switched */
128 #define X86_CR0_NE 0x00000020 /* enable x87 exception */
129 #define X86_CR0_WP 0x00010000 /* supervisor write protect */
130 #define X86_CR0_NW 0x20000000 /* not write-through */
131 #define X86_CR0_CD 0x40000000 /* cache disable */
132 #define X86_CR0_PG 0x80000000 /* enable paging */
133 #define X86_CR4_PAE 0x00000020 /* PAE paging */
134 #define X86_CR4_OSFXSR 0x00000200 /* os supports fxsave */
135 #define X86_CR4_OSXMMEXPT 0x00000400 /* os supports xmm exception */
136 #define X86_CR4_OSXSAVE 0x00040000 /* os supports xsave */
137 #define X86_CR4_SMEP 0x00100000 /* SMEP protection enabling */
138 #define X86_CR4_SMAP 0x00200000 /* SMAP protection enabling */
139 #define x86_EFER_NXE 0x00000800 /* to enable execute disable bit */
140 #define x86_MSR_EFER 0xc0000080 /* EFER Model Specific Register id */
141 #define X86_CR4_PSE 0xffffffef /* Disabling PSE bit in the CR4 */
142
x86_clts(void)143 static inline void x86_clts(void) { __asm__ __volatile__("clts"); }
x86_hlt(void)144 static inline void x86_hlt(void) { __asm__ __volatile__("hlt"); }
x86_sti(void)145 static inline void x86_sti(void) { __asm__ __volatile__("sti"); }
x86_cli(void)146 static inline void x86_cli(void) { __asm__ __volatile__("cli"); }
x86_ltr(uint16_t sel)147 static inline void x86_ltr(uint16_t sel) { __asm__ __volatile__("ltr %%ax" ::"a"(sel)); }
x86_lidt(uintptr_t base)148 static inline void x86_lidt(uintptr_t base) { __asm volatile("lidt (%0)" ::"r"(base) : "memory"); }
x86_lgdt(uintptr_t base)149 static inline void x86_lgdt(uintptr_t base) { __asm volatile("lgdt (%0)" ::"r"(base) : "memory"); }
150
x86_get_cr0(void)151 static inline ulong x86_get_cr0(void) {
152 ulong rv;
153
154 __asm__ __volatile__("mov %%cr0, %0 \n\t" : "=r"(rv));
155 return rv;
156 }
157
x86_set_cr0(ulong in_val)158 static inline void x86_set_cr0(ulong in_val) {
159 __asm__ __volatile__("mov %0,%%cr0 \n\t" : : "r"(in_val));
160 }
161
set_in_cr0(ulong mask)162 static inline void set_in_cr0(ulong mask) {
163 x86_set_cr0(x86_get_cr0() | mask);
164 }
165
clear_in_cr0(ulong mask)166 static inline void clear_in_cr0(ulong mask) {
167 x86_set_cr0(x86_get_cr0() & ~mask);
168 }
169
x86_get_cr2(void)170 static inline ulong x86_get_cr2(void) {
171 ulong rv;
172
173 __asm__ __volatile__("mov %%cr2, %0" : "=r"(rv));
174
175 return rv;
176 }
177
x86_get_cr3(void)178 static inline ulong x86_get_cr3(void) {
179 ulong rv;
180
181 __asm__ __volatile__("mov %%cr3, %0" : "=r"(rv));
182 return rv;
183 }
184
x86_set_cr3(ulong in_val)185 static inline void x86_set_cr3(ulong in_val) {
186 __asm__ __volatile__("mov %0,%%cr3 \n\t" : : "r"(in_val));
187 }
188
x86_get_cr4(void)189 static inline ulong x86_get_cr4(void) {
190 ulong rv;
191
192 __asm__ __volatile__("mov %%cr4, %0 \n\t" : "=r"(rv));
193 return rv;
194 }
195
x86_set_cr4(ulong in_val)196 static inline void x86_set_cr4(ulong in_val) {
197 __asm__ __volatile__("mov %0,%%cr4 \n\t" : : "r"(in_val));
198 }
199
inp(uint16_t _port)200 static inline uint8_t inp(uint16_t _port) {
201 uint8_t rv;
202 __asm__ __volatile__("inb %1, %0" : "=a"(rv) : "dN"(_port));
203 return (rv);
204 }
205
inpw(uint16_t _port)206 static inline uint16_t inpw(uint16_t _port) {
207 uint16_t rv;
208 __asm__ __volatile__("inw %1, %0" : "=a"(rv) : "dN"(_port));
209 return (rv);
210 }
211
inpd(uint16_t _port)212 static inline uint32_t inpd(uint16_t _port) {
213 uint32_t rv;
214 __asm__ __volatile__("inl %1, %0" : "=a"(rv) : "dN"(_port));
215 return (rv);
216 }
217
outp(uint16_t _port,uint8_t _data)218 static inline void outp(uint16_t _port, uint8_t _data) {
219 __asm__ __volatile__("outb %1, %0" : : "dN"(_port), "a"(_data));
220 }
221
outpw(uint16_t _port,uint16_t _data)222 static inline void outpw(uint16_t _port, uint16_t _data) {
223 __asm__ __volatile__("outw %1, %0" : : "dN"(_port), "a"(_data));
224 }
225
outpd(uint16_t _port,uint32_t _data)226 static inline void outpd(uint16_t _port, uint32_t _data) {
227 __asm__ __volatile__("outl %1, %0" : : "dN"(_port), "a"(_data));
228 }
229
inprep(uint16_t _port,uint8_t * _buffer,uint32_t _reads)230 static inline void inprep(uint16_t _port, uint8_t* _buffer, uint32_t _reads) {
231 __asm__ __volatile__(
232 "pushf \n\t"
233 "cli \n\t"
234 "cld \n\t"
235 "rep insb \n\t"
236 "popf \n\t"
237 :
238 : "d"(_port), "D"(_buffer), "c"(_reads));
239 }
240
outprep(uint16_t _port,uint8_t * _buffer,uint32_t _writes)241 static inline void outprep(uint16_t _port, uint8_t* _buffer, uint32_t _writes) {
242 __asm__ __volatile__(
243 "pushf \n\t"
244 "cli \n\t"
245 "cld \n\t"
246 "rep outsb \n\t"
247 "popf \n\t"
248 :
249 : "d"(_port), "S"(_buffer), "c"(_writes));
250 }
251
inpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _reads)252 static inline void inpwrep(uint16_t _port, uint16_t* _buffer, uint32_t _reads) {
253 __asm__ __volatile__(
254 "pushf \n\t"
255 "cli \n\t"
256 "cld \n\t"
257 "rep insw \n\t"
258 "popf \n\t"
259 :
260 : "d"(_port), "D"(_buffer), "c"(_reads));
261 }
262
outpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _writes)263 static inline void outpwrep(uint16_t _port, uint16_t* _buffer, uint32_t _writes) {
264 __asm__ __volatile__(
265 "pushf \n\t"
266 "cli \n\t"
267 "cld \n\t"
268 "rep outsw \n\t"
269 "popf \n\t"
270 :
271 : "d"(_port), "S"(_buffer), "c"(_writes));
272 }
273
inpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _reads)274 static inline void inpdrep(uint16_t _port, uint32_t* _buffer, uint32_t _reads) {
275 __asm__ __volatile__(
276 "pushf \n\t"
277 "cli \n\t"
278 "cld \n\t"
279 "rep insl \n\t"
280 "popf \n\t"
281 :
282 : "d"(_port), "D"(_buffer), "c"(_reads));
283 }
284
outpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _writes)285 static inline void outpdrep(uint16_t _port, uint32_t* _buffer, uint32_t _writes) {
286 __asm__ __volatile__(
287 "pushf \n\t"
288 "cli \n\t"
289 "cld \n\t"
290 "rep outsl \n\t"
291 "popf \n\t"
292 :
293 : "d"(_port), "S"(_buffer), "c"(_writes));
294 }
295
x86_is_paging_enabled(void)296 static inline bool x86_is_paging_enabled(void) {
297 if (x86_get_cr0() & X86_CR0_PG)
298 return true;
299
300 return false;
301 }
302
x86_is_PAE_enabled(void)303 static inline bool x86_is_PAE_enabled(void) {
304 if (x86_is_paging_enabled() == false)
305 return false;
306
307 if (!(x86_get_cr4() & X86_CR4_PAE))
308 return false;
309
310 return true;
311 }
312
cpuid(uint32_t leaf,uint32_t * a,uint32_t * b,uint32_t * c,uint32_t * d)313 static inline void cpuid(uint32_t leaf, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) {
314 __asm__ __volatile__ (
315 "cpuid"
316 : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d)
317 : "a"(leaf), "c"(0));
318 }
319
cpuid_c(uint32_t leaf,uint32_t csel,uint32_t * a,uint32_t * b,uint32_t * c,uint32_t * d)320 static inline void cpuid_c(uint32_t leaf, uint32_t csel, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) {
321 __asm__ __volatile__ (
322 "cpuid"
323 : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d)
324 : "a"(leaf), "c"(csel));
325 }
326
x86_get_address_width(void)327 static inline uint32_t x86_get_address_width(void) {
328 uint32_t a, b, c, d;
329
330 cpuid(X86_CPUID_ADDR_WIDTH, &a, &b, &c, &d);
331
332 /* Extracting bit 15:8 from eax register */
333 return ((a >> 8) & 0x0ff);
334 }
335
check_smep_avail(void)336 static inline bool check_smep_avail(void) {
337 uint32_t a, b, c, d;
338
339 cpuid_c(0x7, 0, &a, &b, &c, &d);
340
341 return ((b >> 0x06) & 0x1);
342 }
343
check_smap_avail(void)344 static inline bool check_smap_avail(void) {
345 uint32_t a, b, c, d;
346
347 cpuid_c(0x7, 0, &a, &b, &c, &d);
348
349 return ((b >> 0x13) & 0x1);
350 }
351
read_msr(uint32_t msr_id)352 static inline uint64_t read_msr (uint32_t msr_id) {
353 uint64_t msr_read_val;
354 uint32_t low_val;
355 uint32_t high_val;
356
357 __asm__ __volatile__ (
358 "rdmsr \n\t"
359 : "=a" (low_val), "=d"(high_val)
360 : "c" (msr_id));
361
362 msr_read_val = high_val;
363 msr_read_val = (msr_read_val << 31) | low_val;
364
365 return msr_read_val;
366 }
367
write_msr(uint32_t msr_id,uint64_t msr_write_val)368 static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val) {
369 uint32_t low_val = (uint32_t)msr_write_val;
370 uint32_t high_val = (uint32_t)(msr_write_val >> 32);
371
372 __asm__ __volatile__ (
373 "wrmsr \n\t"
374 : : "c" (msr_id), "a" (low_val), "d"(high_val));
375 }
376
377 typedef ulong x86_flags_t;
378
x86_save_flags(void)379 static inline x86_flags_t x86_save_flags(void) {
380 x86_flags_t state;
381
382 __asm__ volatile(
383 "pushf;"
384 "pop %0"
385 : "=rm" (state)
386 :: "memory");
387
388 return state;
389 }
390
x86_restore_flags(x86_flags_t flags)391 static inline void x86_restore_flags(x86_flags_t flags) {
392 __asm__ volatile(
393 "push %0;"
394 "popf"
395 :: "g" (flags)
396 : "memory", "cc");
397 }
398
399 #define rdtsc(low,high) \
400 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
401
402 #define rdtscl(low) \
403 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
404
405 __END_CDECLS
406