1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2015 Intel Corporation
4 * Copyright (c) 2016 Travis Geiselbrecht
5 * Copyright 2016 The Fuchsia Authors
6 *
7 *
8 * Use of this source code is governed by a MIT-style
9 * license that can be found in the LICENSE file or at
10 * https://opensource.org/licenses/MIT
11 */
12 #pragma once
13
14 #include <lk/compiler.h>
15 #include <sys/types.h>
16 #include <stdlib.h>
17 #include <stdbool.h>
18
19 __BEGIN_CDECLS
20
21 #define PFEX_P 0x01
22 #define PFEX_W 0x02
23 #define PFEX_U 0x04
24 #define PFEX_RSV 0x08
25 #define PFEX_I 0x10
26 #define X86_8BYTE_MASK 0xFFFFFFFF
27
28 struct x86_32_iframe {
29 uint32_t di, si, bp, sp, bx, dx, cx, ax; // pushed by common handler using pusha
30 uint32_t ds, es, fs, gs; // pushed by common handler
31 uint32_t vector; // pushed by stub
32 uint32_t err_code; // pushed by interrupt or stub
33 uint32_t ip, cs, flags; // pushed by interrupt
34 uint32_t user_sp, user_ss; // pushed by interrupt if priv change occurs
35 };
36
37 struct x86_64_iframe {
38 uint64_t di, si, bp, bx, dx, cx, ax; // pushed by common handler
39 uint64_t r8, r9, r10, r11, r12, r13, r14, r15; // pushed by common handler
40 uint64_t vector; // pushed by stub
41 uint64_t err_code; // pushed by interrupt or stub
42 uint64_t ip, cs, flags; // pushed by interrupt
43 uint64_t user_sp, user_ss; // pushed by interrupt if priv change occurs
44 };
45
46 #if ARCH_X86_32
47 typedef struct x86_32_iframe x86_iframe_t;
48 #elif ARCH_X86_64
49 typedef struct x86_64_iframe x86_iframe_t;
50 #endif
51
52 struct x86_32_context_switch_frame {
53 uint32_t edi, esi, ebp, esp, ebx, edx, ecx, eax;
54 uint32_t eflags;
55 uint32_t eip;
56 };
57
58 struct x86_64_context_switch_frame {
59 uint64_t r15, r14, r13, r12;
60 uint64_t rbp;
61 uint64_t rbx;
62 uint64_t rflags;
63 uint64_t rip;
64 };
65
66 void x86_64_context_switch(vaddr_t *oldsp, vaddr_t newsp);
67
68 /*
69 * x86-32 TSS structure
70 */
71 typedef struct {
72 uint16_t backlink, __blh;
73 uint32_t esp0;
74 uint16_t ss0, __ss0h;
75 uint32_t esp1;
76 uint16_t ss1, __ss1h;
77 uint32_t esp2;
78 uint16_t ss2, __ss2h;
79 uint32_t cr3;
80 uint32_t eip;
81 uint32_t eflags;
82 uint32_t eax, ecx, edx, ebx;
83 uint32_t esp, ebp, esi, edi;
84 uint16_t es, __esh;
85 uint16_t cs, __csh;
86 uint16_t ss, __ssh;
87 uint16_t ds, __dsh;
88 uint16_t fs, __fsh;
89 uint16_t gs, __gsh;
90 uint16_t ldt, __ldth;
91 uint16_t trace, bitmap;
92
93 uint8_t tss_bitmap[8192];
94 } __PACKED tss_32_t;
95
96 /*
97 * x86-64 TSS structure
98 */
99 typedef struct {
100 uint32_t rsvd0;
101 uint64_t rsp0;
102 uint64_t rsp1;
103 uint64_t rsp2;
104 uint32_t rsvd1;
105 uint32_t rsvd2;
106 uint64_t ist1;
107 uint64_t ist2;
108 uint64_t ist3;
109 uint64_t ist4;
110 uint64_t ist5;
111 uint64_t ist6;
112 uint64_t ist7;
113 uint32_t rsvd3;
114 uint32_t rsvd4;
115 uint16_t rsvd5;
116 uint16_t iomap_base;
117 } __PACKED tss_64_t;
118
119 #if ARCH_X86_32
120 typedef tss_32_t tss_t;
121 #elif ARCH_X86_64
122 typedef tss_64_t tss_t;
123 #endif
124
125 /* x86 register bits */
126 #define X86_CR0_PE (1U<<0) /* protected mode enable */
127 #define X86_CR0_MP (1U<<1) /* monitor coprocessor */
128 #define X86_CR0_EM (1U<<2) /* emulation */
129 #define X86_CR0_TS (1U<<3) /* task switched */
130 #define X86_CR0_ET (1U<<4) /* extension type */
131 #define X86_CR0_NE (1U<<5) /* enable x87 exception */
132 #define X86_CR0_WP (1U<<16) /* supervisor write protect */
133 #define X86_CR0_AM (1U<<18) /* alignment mask */
134 #define X86_CR0_NW (1U<<29) /* not write-through */
135 #define X86_CR0_CD (1U<<30) /* cache disable */
136 #define X86_CR0_PG (1U<<31) /* enable paging */
137
138 #define X86_CR4_VME (1U<<0) /* Virtual-8086 mode extensions */
139 #define X86_CR4_PVI (1U<<1) /* Protected Mode Virtual Interrupts */
140 #define X86_CR4_TSD (1U<<2) /* Time stamp disable*/
141 #define X86_CR4_DE (1U<<3) /* Debugging extensions */
142 #define X86_CR4_PSE (1U<<4) /* Page Size Extensions */
143 #define X86_CR4_PAE (1U<<5) /* PAE paging */
144 #define X86_CR4_MCE (1U<<6) /* Machine Check Enable */
145 #define X86_CR4_PGE (1U<<7) /* Page Global Enable */
146 #define X86_CR4_PCE (1U<<8) /* Performance Monitoring Counter Enable */
147 #define X86_CR4_OSFXSR (1U<<9) /* os supports fxsave */
148 #define X86_CR4_OSXMMEXPT (1U<<10) /* os supports xmm exception */
149 #define X86_CR4_UMIP (1U<<11) /* User-mode instruction prevention */
150 #define X86_CR4_LA57 (1U<<12) /* 57-bit Linear Addresses */
151 #define X86_CR4_VMXE (1U<<13) /* enable vmx */
152 #define X86_CR4_SMXE (1U<<14) /* enable smx */
153 #define X86_CR4_FSGSBASE (1U<<16) /* enable {rd,wr}{fs,gs}base */
154 #define X86_CR4_PCIDE (1U<<17) /* Process-context ID enable */
155 #define X86_CR4_OSXSAVE (1U<<18) /* os supports xsave */
156 #define X86_CR4_KL (1U<<19) /* key locker enable */
157 #define X86_CR4_SMEP (1U<<20) /* SMEP protection enabling */
158 #define X86_CR4_SMAP (1U<<21) /* SMAP protection enabling */
159 #define X86_CR4_PKE (1U<<22) /* Enable protection keys for user mode pages */
160 #define X86_CR4_CET (1U<<23) /* Control flow enforcement */
161 #define X86_CR4_PKS (1U<<24) /* Enable protection keys for supervisor mode pages */
162
163 #define X86_EFER_SCE (1U<<0) /* enable SYSCALL */
164 #define X86_EFER_LME (1U<<8) /* long mode enable */
165 #define X86_EFER_LMA (1U<<10) /* long mode active */
166 #define X86_EFER_NXE (1U<<11) /* no execute enable */
167 #define X86_EFER_SVME (1U<<12) /* secure virtual machine enable */
168 #define X86_EFER_LMSLE (1U<<13) /* long mode segment limit enable */
169 #define X86_EFER_FFXSR (1U<<14) /* fast fxsave/fxrstor */
170 #define X86_EFER_TCE (1U<<15) /* translation cache extension */
171 #define X86_EFER_MCOMMIT (1U<<17) /* enable mcommit instruction */
172 #define X86_EFER_INTWB (1U<<18) /* interrupt wbinvd/wbnoinvd enable */
173 #define X86_EFER_UAIE (1U<<20) /* upper address ignore enable */
174 #define X86_EFER_AIBRSE (1U<<20) /* automatic ibrs enable */
175
176 #define X86_MSR_IA32_PLATFORM_ID 0x00000017 /* platform id */
177 #define X86_MSR_IA32_APIC_BASE 0x0000001b /* APIC base physical address */
178 #define X86_MSR_IA32_TSC_ADJUST 0x0000003b /* TSC adjust */
179 #define X86_MSR_IA32_BIOS_SIGN_ID 0x0000008b /* BIOS update signature */
180 #define X86_MSR_IA32_MTRRCAP 0x000000fe /* MTRR capability */
181 #define X86_MSR_IA32_SYSENTER_CS 0x00000174 /* SYSENTER CS */
182 #define X86_MSR_IA32_SYSENTER_ESP 0x00000175 /* SYSENTER ESP */
183 #define X86_MSR_IA32_SYSENTER_EIP 0x00000176 /* SYSENTER EIP */
184 #define X86_MSR_IA32_MCG_CAP 0x00000179 /* global machine check capability */
185 #define X86_MSR_IA32_MCG_STATUS 0x0000017a /* global machine check status */
186 #define X86_MSR_IA32_MISC_ENABLE 0x000001a0 /* enable/disable misc processor features */
187 #define X86_MSR_IA32_TEMPERATURE_TARGET 0x000001a2 /* Temperature target */
188 #define X86_MSR_IA32_MTRR_PHYSBASE0 0x00000200 /* MTRR PhysBase0 */
189 #define X86_MSR_IA32_MTRR_PHYSMASK0 0x00000201 /* MTRR PhysMask0 */
190 #define X86_MSR_IA32_MTRR_PHYSMASK9 0x00000213 /* MTRR PhysMask9 */
191 #define X86_MSR_IA32_MTRR_DEF_TYPE 0x000002ff /* MTRR default type */
192 #define X86_MSR_IA32_MTRR_FIX64K_00000 0x00000250 /* MTRR FIX64K_00000 */
193 #define X86_MSR_IA32_MTRR_FIX16K_80000 0x00000258 /* MTRR FIX16K_80000 */
194 #define X86_MSR_IA32_MTRR_FIX16K_A0000 0x00000259 /* MTRR FIX16K_A0000 */
195 #define X86_MSR_IA32_MTRR_FIX4K_C0000 0x00000268 /* MTRR FIX4K_C0000 */
196 #define X86_MSR_IA32_MTRR_FIX4K_F8000 0x0000026f /* MTRR FIX4K_F8000 */
197 #define X86_MSR_IA32_PAT 0x00000277 /* PAT */
198 #define X86_MSR_IA32_TSC_DEADLINE 0x000006e0 /* TSC deadline */
199 #define X86_MSR_IA32_PM_ENABLE 0x00000770 /* enable/disable HWP */
200 #define X86_MSR_IA32_HWP_CAPABILITIES 0x00000771 /* HWP performance range enumeration */
201 #define X86_MSR_IA32_HWP_REQUEST 0x00000774 /* power manage control hints */
202 #define X86_MSR_IA32_X2APIC_BASE 0x00000800 /* X2APIC base register */
203 #define X86_MSR_IA32_EFER 0xc0000080 /* EFER */
204 #define X86_MSR_IA32_STAR 0xc0000081 /* system call address */
205 #define X86_MSR_IA32_LSTAR 0xc0000082 /* long mode call address */
206 #define X86_MSR_IA32_CSTAR 0xc0000083 /* ia32-e compat call address */
207 #define X86_MSR_IA32_FMASK 0xc0000084 /* system call flag mask */
208 #define X86_MSR_IA32_FS_BASE 0xc0000100 /* fs base address */
209 #define X86_MSR_IA32_GS_BASE 0xc0000101 /* gs base address */
210 #define X86_MSR_IA32_KERNEL_GS_BASE 0xc0000102 /* kernel gs base */
211 #define X86_MSR_IA32_TSC_AUX 0xc0000103 /* TSC aux */
212
213 // Non-architectural MSRs
214 #define X86_MSR_RAPL_POWER_UNIT 0x00000606 /* RAPL unit multipliers */
215 #define X86_MSR_PKG_POWER_LIMIT 0x00000610 /* Package power limits */
216 #define X86_MSR_PKG_POWER_LIMIT_PL1_CLAMP (1U << 16)
217 #define X86_MSR_PKG_POWER_LIMIT_PL1_ENABLE (1U << 15)
218 #define X86_MSR_PKG_ENERGY_STATUS 0x00000611 /* Package energy status */
219 #define X86_MSR_PKG_POWER_INFO 0x00000614 /* Package power range info */
220 #define X86_MSR_DRAM_POWER_LIMIT 0x00000618 /* DRAM RAPL power limit control */
221 #define X86_MSR_DRAM_ENERGY_STATUS 0x00000619 /* DRAM energy status */
222 #define X86_MSR_PP0_POWER_LIMIT 0x00000638 /* PP0 RAPL power limit control */
223 #define X86_MSR_PP0_ENERGY_STATUS 0x00000639 /* PP0 energy status */
224 #define X86_MSR_PP1_POWER_LIMIT 0x00000640 /* PP1 RAPL power limit control */
225 #define X86_MSR_PP1_ENERGY_STATUS 0x00000641 /* PP1 energy status */
226 #define X86_MSR_PLATFORM_ENERGY_COUNTER 0x0000064d /* Platform energy counter */
227 #define X86_MSR_PLATFORM_POWER_LIMIT 0x0000065c /* Platform power limit control */
228
229 /* EFLAGS/RFLAGS */
230 #define X86_FLAGS_CF (1U<<0)
231 #define X86_FLAGS_PF (1U<<2)
232 #define X86_FLAGS_AF (1U<<4)
233 #define X86_FLAGS_ZF (1U<<6)
234 #define X86_FLAGS_SF (1U<<7)
235 #define X86_FLAGS_TF (1U<<8)
236 #define X86_FLAGS_IF (1U<<9)
237 #define X86_FLAGS_DF (1U<<10)
238 #define X86_FLAGS_OF (1U<<11)
239 #define X86_FLAGS_STATUS_MASK (0xfff)
240 #define X86_FLAGS_IOPL_MASK (3U<<12)
241 #define X86_FLAGS_IOPL_SHIFT (12)
242 #define X86_FLAGS_NT (1U<<14)
243 #define X86_FLAGS_RF (1U<<16)
244 #define X86_FLAGS_VM (1U<<17)
245 #define X86_FLAGS_AC (1U<<18)
246 #define X86_FLAGS_VIF (1U<<19)
247 #define X86_FLAGS_VIP (1U<<20)
248 #define X86_FLAGS_ID (1U<<21)
249 #define X86_FLAGS_RESERVED_ONES (0x2)
250 #define X86_FLAGS_RESERVED (0xffc0802a)
251 #define X86_FLAGS_USER (X86_FLAGS_CF | \
252 X86_FLAGS_PF | \
253 X86_FLAGS_AF | \
254 X86_FLAGS_ZF | \
255 X86_FLAGS_SF | \
256 X86_FLAGS_TF | \
257 X86_FLAGS_DF | \
258 X86_FLAGS_OF | \
259 X86_FLAGS_NT | \
260 X86_FLAGS_AC | \
261 X86_FLAGS_ID)
262
x86_clts(void)263 static inline void x86_clts(void) { __asm__ __volatile__("clts"); }
x86_hlt(void)264 static inline void x86_hlt(void) { __asm__ __volatile__("hlt"); }
x86_sti(void)265 static inline void x86_sti(void) { __asm__ __volatile__("sti"); }
x86_cli(void)266 static inline void x86_cli(void) { __asm__ __volatile__("cli"); }
x86_ltr(uint16_t sel)267 static inline void x86_ltr(uint16_t sel) { __asm__ __volatile__("ltr %%ax" ::"a"(sel)); }
x86_lidt(uintptr_t base)268 static inline void x86_lidt(uintptr_t base) { __asm volatile("lidt (%0)" ::"r"(base) : "memory"); }
x86_lgdt(uintptr_t base)269 static inline void x86_lgdt(uintptr_t base) { __asm volatile("lgdt (%0)" ::"r"(base) : "memory"); }
270
x86_get_cr0(void)271 static inline ulong x86_get_cr0(void) {
272 ulong rv;
273
274 __asm__ __volatile__("mov %%cr0, %0 \n\t" : "=r"(rv));
275 return rv;
276 }
277
x86_set_cr0(ulong in_val)278 static inline void x86_set_cr0(ulong in_val) {
279 __asm__ __volatile__("mov %0,%%cr0 \n\t" : : "r"(in_val));
280 }
281
set_in_cr0(ulong mask)282 static inline void set_in_cr0(ulong mask) {
283 x86_set_cr0(x86_get_cr0() | mask);
284 }
285
clear_in_cr0(ulong mask)286 static inline void clear_in_cr0(ulong mask) {
287 x86_set_cr0(x86_get_cr0() & ~mask);
288 }
289
x86_get_cr2(void)290 static inline ulong x86_get_cr2(void) {
291 ulong rv;
292
293 __asm__ __volatile__("mov %%cr2, %0" : "=r"(rv));
294
295 return rv;
296 }
297
x86_get_cr3(void)298 static inline ulong x86_get_cr3(void) {
299 ulong rv;
300
301 __asm__ __volatile__("mov %%cr3, %0" : "=r"(rv));
302 return rv;
303 }
304
x86_set_cr3(ulong in_val)305 static inline void x86_set_cr3(ulong in_val) {
306 __asm__ __volatile__("mov %0,%%cr3 \n\t" : : "r"(in_val));
307 }
308
x86_get_cr4(void)309 static inline ulong x86_get_cr4(void) {
310 ulong rv;
311
312 __asm__ __volatile__("mov %%cr4, %0 \n\t" : "=r"(rv));
313 return rv;
314 }
315
x86_set_cr4(ulong in_val)316 static inline void x86_set_cr4(ulong in_val) {
317 __asm__ __volatile__("mov %0,%%cr4 \n\t" : : "r"(in_val));
318 }
319
320 #define DEFINE_REGISTER_ACCESSOR(REG) \
321 static inline void x86_set_##REG(uint16_t value) { \
322 __asm__ volatile("mov %0, %%" #REG : : "r"(value)); \
323 } \
324 static inline uint16_t x86_get_##REG(void) { \
325 uint16_t value; \
326 __asm__ volatile("mov %%" #REG ", %0" : "=r"(value)); \
327 return value; \
328 }
329
330 DEFINE_REGISTER_ACCESSOR(ds)
DEFINE_REGISTER_ACCESSOR(es)331 DEFINE_REGISTER_ACCESSOR(es)
332 DEFINE_REGISTER_ACCESSOR(fs)
333 DEFINE_REGISTER_ACCESSOR(gs)
334
335 #undef DEFINE_REGISTER_ACCESSOR
336
337 static inline uint8_t inp(uint16_t _port) {
338 uint8_t rv;
339 __asm__ __volatile__("inb %1, %0" : "=a"(rv) : "dN"(_port));
340 return (rv);
341 }
342
inpw(uint16_t _port)343 static inline uint16_t inpw(uint16_t _port) {
344 uint16_t rv;
345 __asm__ __volatile__("inw %1, %0" : "=a"(rv) : "dN"(_port));
346 return (rv);
347 }
348
inpd(uint16_t _port)349 static inline uint32_t inpd(uint16_t _port) {
350 uint32_t rv;
351 __asm__ __volatile__("inl %1, %0" : "=a"(rv) : "dN"(_port));
352 return (rv);
353 }
354
outp(uint16_t _port,uint8_t _data)355 static inline void outp(uint16_t _port, uint8_t _data) {
356 __asm__ __volatile__("outb %1, %0" : : "dN"(_port), "a"(_data));
357 }
358
outpw(uint16_t _port,uint16_t _data)359 static inline void outpw(uint16_t _port, uint16_t _data) {
360 __asm__ __volatile__("outw %1, %0" : : "dN"(_port), "a"(_data));
361 }
362
outpd(uint16_t _port,uint32_t _data)363 static inline void outpd(uint16_t _port, uint32_t _data) {
364 __asm__ __volatile__("outl %1, %0" : : "dN"(_port), "a"(_data));
365 }
366
inprep(uint16_t _port,uint8_t * _buffer,uint32_t _reads)367 static inline void inprep(uint16_t _port, uint8_t* _buffer, uint32_t _reads) {
368 __asm__ __volatile__(
369 "pushf \n\t"
370 "cli \n\t"
371 "cld \n\t"
372 "rep insb \n\t"
373 "popf \n\t"
374 : "+D"(_buffer), "+c"(_reads)
375 : "d"(_port)
376 : "memory");
377 }
378
outprep(uint16_t _port,uint8_t * _buffer,uint32_t _writes)379 static inline void outprep(uint16_t _port, uint8_t* _buffer, uint32_t _writes) {
380 __asm__ __volatile__(
381 "pushf \n\t"
382 "cli \n\t"
383 "cld \n\t"
384 "rep outsb \n\t"
385 "popf \n\t"
386 : "+S"(_buffer), "+c"(_writes)
387 : "d"(_port));
388 }
389
inpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _reads)390 static inline void inpwrep(uint16_t _port, uint16_t* _buffer, uint32_t _reads) {
391 __asm__ __volatile__(
392 "pushf \n\t"
393 "cli \n\t"
394 "cld \n\t"
395 "rep insw \n\t"
396 "popf \n\t"
397 : "+D"(_buffer), "+c"(_reads)
398 : "d"(_port)
399 : "memory");
400 }
401
outpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _writes)402 static inline void outpwrep(uint16_t _port, uint16_t* _buffer, uint32_t _writes) {
403 __asm__ __volatile__(
404 "pushf \n\t"
405 "cli \n\t"
406 "cld \n\t"
407 "rep outsw \n\t"
408 "popf \n\t"
409 : "+S"(_buffer), "+c"(_writes)
410 : "d"(_port));
411 }
412
inpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _reads)413 static inline void inpdrep(uint16_t _port, uint32_t* _buffer, uint32_t _reads) {
414 __asm__ __volatile__(
415 "pushf \n\t"
416 "cli \n\t"
417 "cld \n\t"
418 "rep insl \n\t"
419 "popf \n\t"
420 : "+D"(_buffer), "+c"(_reads)
421 : "d"(_port)
422 : "memory");
423 }
424
outpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _writes)425 static inline void outpdrep(uint16_t _port, uint32_t* _buffer, uint32_t _writes) {
426 __asm__ __volatile__(
427 "pushf \n\t"
428 "cli \n\t"
429 "cld \n\t"
430 "rep outsl \n\t"
431 "popf \n\t"
432 : "+S"(_buffer), "+c"(_writes)
433 : "d"(_port));
434 }
435
x86_is_paging_enabled(void)436 static inline bool x86_is_paging_enabled(void) {
437 if (x86_get_cr0() & X86_CR0_PG)
438 return true;
439
440 return false;
441 }
442
x86_is_PAE_enabled(void)443 static inline bool x86_is_PAE_enabled(void) {
444 if (x86_is_paging_enabled() == false)
445 return false;
446
447 if (!(x86_get_cr4() & X86_CR4_PAE))
448 return false;
449
450 return true;
451 }
452
cpuid(uint32_t leaf,uint32_t * a,uint32_t * b,uint32_t * c,uint32_t * d)453 static inline void cpuid(uint32_t leaf, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) {
454 __asm__ __volatile__ (
455 "cpuid"
456 : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d)
457 : "a"(leaf), "c"(0));
458 }
459
cpuid_c(uint32_t leaf,uint32_t csel,uint32_t * a,uint32_t * b,uint32_t * c,uint32_t * d)460 static inline void cpuid_c(uint32_t leaf, uint32_t csel, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) {
461 __asm__ __volatile__ (
462 "cpuid"
463 : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d)
464 : "a"(leaf), "c"(csel));
465 }
466
read_msr(uint32_t msr_id)467 static inline uint64_t read_msr (uint32_t msr_id) {
468 uint64_t msr_read_val;
469 uint32_t low_val;
470 uint32_t high_val;
471
472 __asm__ __volatile__ (
473 "rdmsr \n\t"
474 : "=a" (low_val), "=d"(high_val)
475 : "c" (msr_id));
476
477 msr_read_val = high_val;
478 msr_read_val = (msr_read_val << 32) | low_val;
479
480 return msr_read_val;
481 }
482
write_msr(uint32_t msr_id,uint64_t msr_write_val)483 static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val) {
484 uint32_t low_val = (uint32_t)msr_write_val;
485 uint32_t high_val = (uint32_t)(msr_write_val >> 32);
486
487 __asm__ __volatile__ (
488 "wrmsr \n\t"
489 : : "c" (msr_id), "a" (low_val), "d"(high_val));
490 }
491
492 #pragma GCC diagnostic push
493 /* The dereference of offset in the inline asm below generates this warning in GCC */
494 #pragma GCC diagnostic ignored "-Warray-bounds"
x86_read_gs_offset64(uintptr_t offset)495 static inline uint64_t x86_read_gs_offset64(uintptr_t offset) {
496 uint64_t ret;
497 __asm__("movq %%gs:%1, %0" : "=r"(ret) : "m"(*(uint64_t*)(offset)));
498 return ret;
499 }
500
x86_write_gs_offset64(uintptr_t offset,uint64_t val)501 static inline void x86_write_gs_offset64(uintptr_t offset, uint64_t val) {
502 __asm__("movq %0, %%gs:%1" : : "ir"(val), "m"(*(uint64_t*)(offset)) : "memory");
503 }
504
x86_read_gs_offset32(uintptr_t offset)505 static inline uint32_t x86_read_gs_offset32(uintptr_t offset) {
506 uint32_t ret;
507 __asm__("movl %%gs:%1, %0" : "=r"(ret) : "m"(*(uint32_t*)(offset)));
508 return ret;
509 }
510
x86_write_gs_offset32(uintptr_t offset,uint32_t val)511 static inline void x86_write_gs_offset32(uintptr_t offset, uint32_t val) {
512 __asm__("movl %0, %%gs:%1" : : "ir"(val), "m"(*(uint32_t*)(offset)) : "memory");
513 }
514 #pragma GCC diagnostic pop
515
516 /* cannot easily use C generics or C++ templates here, so do it the hard way */
517 #if __SIZEOF_POINTER__ == 8
x86_read_gs_offset_ptr(uintptr_t offset)518 static inline void *x86_read_gs_offset_ptr(uintptr_t offset) {
519 return (void *)x86_read_gs_offset64(offset);
520 }
x86_write_gs_offset_ptr(uintptr_t offset,void * val)521 static inline void x86_write_gs_offset_ptr(uintptr_t offset, void *val) {
522 x86_write_gs_offset64(offset, (uint64_t)(val));
523 }
524 #else
x86_read_gs_offset_ptr(uintptr_t offset)525 static inline void *x86_read_gs_offset_ptr(uintptr_t offset) {
526 return (void *)x86_read_gs_offset32(offset);
527 }
x86_write_gs_offset_ptr(uintptr_t offset,void * val)528 static inline void x86_write_gs_offset_ptr(uintptr_t offset, void *val) {
529 x86_write_gs_offset32(offset, (uint32_t)(val));
530 }
531 #endif
532
533 typedef ulong x86_flags_t;
534
x86_save_flags(void)535 static inline x86_flags_t x86_save_flags(void) {
536 x86_flags_t state;
537
538 __asm__ volatile(
539 "pushf;"
540 "pop %0"
541 : "=rm" (state)
542 :: "memory");
543
544 return state;
545 }
546
x86_restore_flags(x86_flags_t flags)547 static inline void x86_restore_flags(x86_flags_t flags) {
548 __asm__ volatile(
549 "push %0;"
550 "popf"
551 :: "g" (flags)
552 : "memory", "cc");
553 }
554
tlbsync_local(vaddr_t address)555 static inline void tlbsync_local(vaddr_t address) {
556 asm volatile("invlpg %0" :: "m"(*(uint8_t *)address));
557 }
558
559 void x86_early_init_percpu(void);
560
561 __END_CDECLS
562