1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2009 Corey Tabaka
3 // Copyright (c) 2015 Intel Corporation
4 // Copyright (c) 2016 Travis Geiselbrecht
5 //
6 // Use of this source code is governed by a MIT-style
7 // license that can be found in the LICENSE file or at
8 // https://opensource.org/licenses/MIT
9 
10 #pragma once
11 
12 #include <cpuid.h>
13 #include <stdbool.h>
14 #include <stdlib.h>
15 #include <sys/types.h>
16 #include <zircon/compiler.h>
17 #include <zircon/types.h>
18 
19 #include <arch/x86/general_regs.h>
20 #include <arch/x86/registers.h>
21 #include <arch/x86/x86intrin.h>
22 #include <syscalls/syscalls.h>
23 
24 __BEGIN_CDECLS
25 
26 #define X86_8BYTE_MASK 0xFFFFFFFF
27 
28 struct x86_64_iframe {
29     uint64_t rdi, rsi, rbp, rbx, rdx, rcx, rax;    // pushed by common handler
30     uint64_t r8, r9, r10, r11, r12, r13, r14, r15; // pushed by common handler
31     uint64_t vector;                               // pushed by stub
32     uint64_t err_code;                             // pushed by interrupt or stub
33     uint64_t ip, cs, flags;                        // pushed by interrupt
34     uint64_t user_sp, user_ss;                     // pushed by interrupt
35 };
36 
37 typedef struct x86_64_iframe x86_iframe_t;
38 typedef struct x86_64_iframe iframe_t;
39 
40 void x86_exception_handler(x86_iframe_t* frame);
41 void platform_irq(x86_iframe_t* frame);
42 
43 struct arch_exception_context {
44     bool is_page_fault;
45     x86_iframe_t* frame;
46     uint64_t cr2;
47 };
48 
49 // Register state layout used by x86_64_context_switch().
50 struct x86_64_context_switch_frame {
51     uint64_t r15, r14, r13, r12;
52     uint64_t rbp;
53     uint64_t rbx;
54     uint64_t rip;
55 };
56 
57 void x86_64_context_switch(vaddr_t* oldsp, vaddr_t newsp);
58 void x86_uspace_entry(uintptr_t arg1, uintptr_t arg2, uintptr_t sp,
59                       uintptr_t pc, uint64_t rflags) __NO_RETURN;
60 
61 void x86_syscall(void);
62 
63 void x86_syscall_process_pending_signals(x86_syscall_general_regs_t* gregs);
64 
65 /* @brief Register all of the CPUs in the system
66  *
67  * Must be called only once.
68  *
69  * @param apic_ids A list of all APIC IDs in the system.  The BP should be in
70  *        the list.
71  * @param num_cpus The number of entries in the apic_ids list.
72  */
73 void x86_init_smp(uint32_t* apic_ids, uint32_t num_cpus);
74 
75 /* @brief Bring all of the specified APs up and hand them over to the kernel
76  *
77  * This function must not be called before x86_init_smp.
78  *
79  * May be called by any running CPU.  Due to requiring use of the very limited
80  * low 1MB of memory, this function is not re-entrant.  Itshould not be executed
81  * more than once concurrently.
82  *
83  * @param apic_ids A list of all APIC IDs to launch.
84  * @param count The number of entries in the apic_ids list.
85  *
86  * @return ZX_ERR_INVALID_ARGS if an unknown APIC ID was provided.
87  * @return ZX_ERR_BAD_STATE if one of the targets is currently online
88  * @return ZX_ERR_TIMED_OUT if one of the targets failed to launch
89  */
90 zx_status_t x86_bringup_aps(uint32_t* apic_ids, uint32_t count);
91 
92 #define IO_BITMAP_BITS 65536
93 #define IO_BITMAP_BYTES (IO_BITMAP_BITS / 8)
94 #define IO_BITMAP_LONGS (IO_BITMAP_BITS / sizeof(long))
95 
96 /*
97  * Assignment of Interrupt Stack Table entries
98  */
99 #define NUM_ASSIGNED_IST_ENTRIES 3
100 #define NMI_IST_INDEX 1
101 #define MCE_IST_INDEX 2
102 #define DBF_IST_INDEX 3
103 
104 /*
105  * x86-64 TSS structure
106  */
107 typedef struct {
108     uint32_t rsvd0;
109     uint64_t rsp0;
110     uint64_t rsp1;
111     uint64_t rsp2;
112     uint32_t rsvd1;
113     uint32_t rsvd2;
114     uint64_t ist1;
115     uint64_t ist2;
116     uint64_t ist3;
117     uint64_t ist4;
118     uint64_t ist5;
119     uint64_t ist6;
120     uint64_t ist7;
121     uint32_t rsvd3;
122     uint32_t rsvd4;
123     uint16_t rsvd5;
124     uint16_t iomap_base;
125 
126     uint8_t tss_bitmap[IO_BITMAP_BYTES + 1];
127 } __PACKED tss_64_t;
128 
129 typedef tss_64_t tss_t;
130 
x86_clts(void)131 static inline void x86_clts(void) {
132     __asm__ __volatile__("clts");
133 }
x86_hlt(void)134 static inline void x86_hlt(void) {
135     __asm__ __volatile__("hlt");
136 }
x86_sti(void)137 static inline void x86_sti(void) {
138     __asm__ __volatile__("sti");
139 }
x86_cli(void)140 static inline void x86_cli(void) {
141     __asm__ __volatile__("cli");
142 }
x86_ltr(uint16_t sel)143 static inline void x86_ltr(uint16_t sel) {
144     __asm__ __volatile__("ltr %%ax" ::"a"(sel));
145 }
x86_lidt(uintptr_t base)146 static inline void x86_lidt(uintptr_t base) {
147     __asm volatile("lidt (%0)" ::"r"(base)
148                    : "memory");
149 }
x86_lgdt(uintptr_t base)150 static inline void x86_lgdt(uintptr_t base)
151 {
152     __asm volatile("lgdt (%0)" :: "r"(base) : "memory");
153 }
154 
inp(uint16_t _port)155 static inline uint8_t inp(uint16_t _port) {
156     uint8_t rv;
157     __asm__ __volatile__("inb %1, %0"
158                          : "=a"(rv)
159                          : "dN"(_port));
160     return (rv);
161 }
162 
inpw(uint16_t _port)163 static inline uint16_t inpw(uint16_t _port) {
164     uint16_t rv;
165     __asm__ __volatile__("inw %1, %0"
166                          : "=a"(rv)
167                          : "dN"(_port));
168     return (rv);
169 }
170 
inpd(uint16_t _port)171 static inline uint32_t inpd(uint16_t _port) {
172     uint32_t rv;
173     __asm__ __volatile__("inl %1, %0"
174                          : "=a"(rv)
175                          : "dN"(_port));
176     return (rv);
177 }
178 
outp(uint16_t _port,uint8_t _data)179 static inline void outp(uint16_t _port, uint8_t _data) {
180     __asm__ __volatile__("outb %1, %0"
181                          :
182                          : "dN"(_port),
183                            "a"(_data));
184 }
185 
outpw(uint16_t _port,uint16_t _data)186 static inline void outpw(uint16_t _port, uint16_t _data) {
187     __asm__ __volatile__("outw %1, %0"
188                          :
189                          : "dN"(_port),
190                            "a"(_data));
191 }
192 
outpd(uint16_t _port,uint32_t _data)193 static inline void outpd(uint16_t _port, uint32_t _data) {
194     __asm__ __volatile__("outl %1, %0"
195                          :
196                          : "dN"(_port),
197                            "a"(_data));
198 }
199 
rdtsc(void)200 static inline uint64_t rdtsc(void) {
201     return __rdtsc();
202 }
203 
cpuid(uint32_t sel,uint32_t * a,uint32_t * b,uint32_t * c,uint32_t * d)204 static inline void cpuid(uint32_t sel, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
205     __cpuid(sel, *a, *b, *c, *d);
206 }
207 
208 /* cpuid wrapper with ecx set to a second argument */
cpuid_c(uint32_t sel,uint32_t sel_c,uint32_t * a,uint32_t * b,uint32_t * c,uint32_t * d)209 static inline void cpuid_c(uint32_t sel, uint32_t sel_c, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
210     __cpuid_count(sel, sel_c, *a, *b, *c, *d);
211 }
212 
set_in_cr0(ulong mask)213 static inline void set_in_cr0(ulong mask) {
214     ulong temp;
215 
216     __asm__ __volatile__(
217         "mov %%cr0, %0  \n\t"
218         "or %1, %0      \n\t"
219         "mov %0, %%cr0   \n\t"
220         : "=r"(temp)
221         : "irg"(mask)
222         :);
223 }
224 
clear_in_cr0(ulong mask)225 static inline void clear_in_cr0(ulong mask) {
226     ulong temp;
227 
228     __asm__ __volatile__(
229         "mov %%cr0, %0  \n\t"
230         "and %1, %0     \n\t"
231         "mov %0, %%cr0  \n\t"
232         : "=r"(temp)
233         : "irg"(~mask)
234         :);
235 }
236 
x86_get_cr2(void)237 static inline ulong x86_get_cr2(void) {
238     ulong rv;
239 
240     __asm__ __volatile__(
241         "mov %%cr2, %0"
242         : "=r"(rv));
243 
244     return rv;
245 }
246 
x86_get_cr3(void)247 static inline ulong x86_get_cr3(void) {
248     ulong rv;
249 
250     __asm__ __volatile__(
251         "mov %%cr3, %0"
252         : "=r"(rv));
253     return rv;
254 }
255 
x86_set_cr3(ulong in_val)256 static inline void x86_set_cr3(ulong in_val) {
257     __asm__ __volatile__(
258         "mov %0,%%cr3 \n\t"
259         :
260         : "r"(in_val));
261 }
262 
x86_get_cr0(void)263 static inline ulong x86_get_cr0(void) {
264     ulong rv;
265 
266     __asm__ __volatile__(
267         "mov %%cr0, %0 \n\t"
268         : "=r"(rv));
269     return rv;
270 }
271 
x86_get_cr4(void)272 static inline ulong x86_get_cr4(void) {
273     ulong rv;
274 
275     __asm__ __volatile__(
276         "mov %%cr4, %0 \n\t"
277         : "=r"(rv));
278     return rv;
279 }
280 
x86_set_cr0(ulong in_val)281 static inline void x86_set_cr0(ulong in_val) {
282     __asm__ __volatile__(
283         "mov %0,%%cr0 \n\t"
284         :
285         : "r"(in_val));
286 }
287 
x86_set_cr4(ulong in_val)288 static inline void x86_set_cr4(ulong in_val) {
289     __asm__ __volatile__(
290         "mov %0,%%cr4 \n\t"
291         :
292         : "r"(in_val));
293 }
294 
295 #define DEFINE_REGISTER_ACCESSOR(REG)              \
296     static inline void set_##REG(uint16_t value) { \
297         __asm__ volatile("mov %0, %%" #REG         \
298                          :                         \
299                          : "r"(value));            \
300     }                                              \
301     static inline uint16_t get_##REG(void) {       \
302         uint16_t value;                            \
303         __asm__ volatile("mov %%" #REG ", %0"      \
304                          : "=r"(value));           \
305         return value;                              \
306     }
307 
308 DEFINE_REGISTER_ACCESSOR(ds)
DEFINE_REGISTER_ACCESSOR(es)309 DEFINE_REGISTER_ACCESSOR(es)
310 DEFINE_REGISTER_ACCESSOR(fs)
311 DEFINE_REGISTER_ACCESSOR(gs)
312 
313 #undef DEFINE_REGISTER_ACCESSOR
314 
315 static inline uint64_t read_msr(uint32_t msr_id) {
316     uint32_t msr_read_val_lo;
317     uint32_t msr_read_val_hi;
318 
319     __asm__ __volatile__(
320         "rdmsr \n\t"
321         : "=a"(msr_read_val_lo), "=d"(msr_read_val_hi)
322         : "c"(msr_id));
323 
324     return ((uint64_t)msr_read_val_hi << 32) | msr_read_val_lo;
325 }
326 
read_msr32(uint32_t msr_id)327 static inline uint32_t read_msr32(uint32_t msr_id) {
328     uint32_t msr_read_val;
329 
330     __asm__ __volatile__(
331         "rdmsr \n\t"
332         : "=a"(msr_read_val)
333         : "c"(msr_id)
334         : "rdx");
335 
336     return msr_read_val;
337 }
338 
339 zx_status_t read_msr_safe(uint32_t msr_id, uint64_t* val);
340 
write_msr(uint32_t msr_id,uint64_t msr_write_val)341 static inline void write_msr(uint32_t msr_id, uint64_t msr_write_val) {
342     __asm__ __volatile__(
343         "wrmsr \n\t"
344         :
345         : "c"(msr_id), "a"(msr_write_val & 0xffffffff), "d"(msr_write_val >> 32));
346 }
347 
x86_is_paging_enabled(void)348 static inline bool x86_is_paging_enabled(void) {
349     if (x86_get_cr0() & X86_CR0_PG)
350         return true;
351 
352     return false;
353 }
354 
x86_is_PAE_enabled(void)355 static inline bool x86_is_PAE_enabled(void) {
356     if (x86_is_paging_enabled() == false)
357         return false;
358 
359     if (!(x86_get_cr4() & X86_CR4_PAE))
360         return false;
361 
362     return true;
363 }
364 
x86_read_gs_offset64(uintptr_t offset)365 static inline uint64_t x86_read_gs_offset64(uintptr_t offset) {
366     uint64_t ret;
367     __asm__("movq  %%gs:%1, %0"
368             : "=r"(ret)
369             : "m"(*(uint64_t*)(offset)));
370     return ret;
371 }
372 
x86_write_gs_offset64(uintptr_t offset,uint64_t val)373 static inline void x86_write_gs_offset64(uintptr_t offset, uint64_t val) {
374     __asm__("movq  %0, %%gs:%1"
375             :
376             : "ir"(val), "m"(*(uint64_t*)(offset))
377             : "memory");
378 }
379 
x86_read_gs_offset32(uintptr_t offset)380 static inline uint32_t x86_read_gs_offset32(uintptr_t offset) {
381     uint32_t ret;
382     __asm__("movl  %%gs:%1, %0"
383             : "=r"(ret)
384             : "m"(*(uint32_t*)(offset)));
385     return ret;
386 }
387 
x86_write_gs_offset32(uintptr_t offset,uint32_t val)388 static inline void x86_write_gs_offset32(uintptr_t offset, uint32_t val) {
389     __asm__("movl   %0, %%gs:%1"
390             :
391             : "ir"(val), "m"(*(uint32_t*)(offset))
392             : "memory");
393 }
394 
395 typedef uint64_t x86_flags_t;
396 
x86_save_flags(void)397 static inline uint64_t x86_save_flags(void) {
398     uint64_t state;
399 
400     __asm__ volatile(
401         "pushfq;"
402         "popq %0"
403         : "=rm"(state)::"memory");
404 
405     return state;
406 }
407 
x86_restore_flags(uint64_t flags)408 static inline void x86_restore_flags(uint64_t flags) {
409     __asm__ volatile(
410         "pushq %0;"
411         "popfq" ::"g"(flags)
412         : "memory", "cc");
413 }
414 
inprep(uint16_t _port,uint8_t * _buffer,uint32_t _reads)415 static inline void inprep(uint16_t _port, uint8_t* _buffer, uint32_t _reads) {
416     __asm__ __volatile__("pushfq \n\t"
417                          "cli \n\t"
418                          "cld \n\t"
419                          "rep insb \n\t"
420                          "popfq \n\t"
421                          :
422                          : "d"(_port),
423                            "D"(_buffer),
424                            "c"(_reads));
425 }
426 
outprep(uint16_t _port,uint8_t * _buffer,uint32_t _writes)427 static inline void outprep(uint16_t _port, uint8_t* _buffer, uint32_t _writes) {
428     __asm__ __volatile__("pushfq \n\t"
429                          "cli \n\t"
430                          "cld \n\t"
431                          "rep outsb \n\t"
432                          "popfq \n\t"
433                          :
434                          : "d"(_port),
435                            "S"(_buffer),
436                            "c"(_writes));
437 }
438 
inpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _reads)439 static inline void inpwrep(uint16_t _port, uint16_t* _buffer, uint32_t _reads) {
440     __asm__ __volatile__("pushfq \n\t"
441                          "cli \n\t"
442                          "cld \n\t"
443                          "rep insw \n\t"
444                          "popfq \n\t"
445                          :
446                          : "d"(_port),
447                            "D"(_buffer),
448                            "c"(_reads));
449 }
450 
outpwrep(uint16_t _port,uint16_t * _buffer,uint32_t _writes)451 static inline void outpwrep(uint16_t _port, uint16_t* _buffer,
452                             uint32_t _writes) {
453     __asm__ __volatile__("pushfq \n\t"
454                          "cli \n\t"
455                          "cld \n\t"
456                          "rep outsw \n\t"
457                          "popfq \n\t"
458                          :
459                          : "d"(_port),
460                            "S"(_buffer),
461                            "c"(_writes));
462 }
463 
inpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _reads)464 static inline void inpdrep(uint16_t _port, uint32_t* _buffer,
465                            uint32_t _reads) {
466     __asm__ __volatile__("pushfq \n\t"
467                          "cli \n\t"
468                          "cld \n\t"
469                          "rep insl \n\t"
470                          "popfq \n\t"
471                          :
472                          : "d"(_port),
473                            "D"(_buffer),
474                            "c"(_reads));
475 }
476 
outpdrep(uint16_t _port,uint32_t * _buffer,uint32_t _writes)477 static inline void outpdrep(uint16_t _port, uint32_t* _buffer,
478                             uint32_t _writes) {
479     __asm__ __volatile__("pushfq \n\t"
480                          "cli \n\t"
481                          "cld \n\t"
482                          "rep outsl \n\t"
483                          "popfq \n\t"
484                          :
485                          : "d"(_port),
486                            "S"(_buffer),
487                            "c"(_writes));
488 }
489 
490 void x86_monitor(volatile void* addr);
491 void x86_mwait(void);
492 void x86_idle(void);
493 
494 __END_CDECLS
495