1 /*
2  * Copyright (c) 2009 Corey Tabaka
3  * Copyright (c) 2014 Travis Geiselbrecht
4  *
5  * Use of this source code is governed by a MIT-style
6  * license that can be found in the LICENSE file or at
7  * https://opensource.org/licenses/MIT
8  */
9 #pragma once
10 
11 #include <lk/compiler.h>
12 
13 #ifndef ASSEMBLY
14 
15 #include <arch/x86.h>
16 
17 /* override of some routines */
arch_enable_ints(void)18 static inline void arch_enable_ints(void) {
19     CF;
20     __asm__ volatile("sti");
21 }
22 
arch_disable_ints(void)23 static inline void arch_disable_ints(void) {
24     __asm__ volatile("cli");
25     CF;
26 }
27 
arch_ints_disabled(void)28 static inline bool arch_ints_disabled(void) {
29     x86_flags_t state;
30 
31     __asm__ volatile(
32 #if ARCH_X86_32
33         "pushfl;"
34         "popl %%eax"
35 #elif ARCH_X86_64
36         "pushfq;"
37         "popq %%rax"
38 #endif
39         : "=a" (state)
40         :: "memory");
41 
42     return !(state & (1<<9));
43 }
44 
arch_cycle_count(void)45 static inline ulong arch_cycle_count(void) {
46 #if X86_LEGACY
47     return 0;
48 #else
49     return __builtin_ia32_rdtsc();
50 #endif
51 }
52 
53 #if WITH_SMP
54 #include <arch/x86/mp.h>
arch_get_current_thread(void)55 static inline struct thread *arch_get_current_thread(void) {
56     return x86_get_current_thread();
57 }
58 
arch_set_current_thread(struct thread * t)59 static inline void arch_set_current_thread(struct thread *t) {
60     x86_set_current_thread(t);
61 }
62 
arch_curr_cpu_num(void)63 static inline uint arch_curr_cpu_num(void) {
64     return x86_get_cpu_num();
65 }
66 #else
67 /* use a global pointer to store the current_thread */
68 extern struct thread *_current_thread;
69 
arch_get_current_thread(void)70 static inline struct thread *arch_get_current_thread(void) {
71     return _current_thread;
72 }
73 
arch_set_current_thread(struct thread * t)74 static inline void arch_set_current_thread(struct thread *t) {
75     _current_thread = t;
76 }
77 
arch_curr_cpu_num(void)78 static inline uint arch_curr_cpu_num(void) {
79     return 0;
80 }
81 #endif
82 
83 #if ARCH_X86_64
84 // relies on SSE2
85 #define mb()        __asm__ volatile("mfence" : : : "memory")
86 #define rmb()       __asm__ volatile("lfence" : : : "memory")
87 #define wmb()       __asm__ volatile("sfence" : : : "memory")
88 #else
89 // Store to the top of the stack as a load/store barrier. Cannot
90 // rely on SS2 being intrinsically available for older i386 class hardware.
91 #define __storeload_barrier \
92     __asm__ volatile("lock; addl $0, (%%esp)" : : : "memory", "cc")
93 #define mb()        __storeload_barrier
94 #define rmb()       __storeload_barrier
95 #define wmb()       __storeload_barrier
96 #endif
97 
98 #ifdef WITH_SMP
99 // XXX probably too strict
100 #define smp_mb()    mb
101 #define smp_rmb()   rmb
102 #define smp_wmb()   wmb
103 #else
104 #define smp_mb()    CF
105 #define smp_wmb()   CF
106 #define smp_rmb()   CF
107 #endif
108 
109 
110 #endif // !ASSEMBLY
111