1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3 
4 #include <xen/bitops.h>
5 #include <xen/bug.h>
6 #include <asm/cpufeature.h>
7 #include <asm/x86-defns.h>
8 
wbinvd(void)9 static inline void wbinvd(void)
10 {
11     asm volatile ( "wbinvd" ::: "memory" );
12 }
13 
wbnoinvd(void)14 static inline void wbnoinvd(void)
15 {
16     asm volatile ( "repe; wbinvd" : : : "memory" );
17 }
18 
clflush(const void * p)19 static inline void clflush(const void *p)
20 {
21     asm volatile ( "clflush %0" :: "m" (*(const char *)p) );
22 }
23 
clflushopt(const void * p)24 static inline void clflushopt(const void *p)
25 {
26     asm volatile ( "data16 clflush %0" :: "m" (*(const char *)p) );
27 }
28 
clwb(const void * p)29 static inline void clwb(const void *p)
30 {
31 #if defined(HAVE_AS_CLWB)
32     asm volatile ( "clwb %0" :: "m" (*(const char *)p) );
33 #elif defined(HAVE_AS_XSAVEOPT)
34     asm volatile ( "data16 xsaveopt %0" :: "m" (*(const char *)p) );
35 #else
36     asm volatile ( ".byte 0x66, 0x0f, 0xae, 0x32"
37                    :: "d" (p), "m" (*(const char *)p) );
38 #endif
39 }
40 
41 #define xchg(ptr,v) \
42     ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
43 
44 #include <asm/x86_64/system.h>
45 
46 /*
47  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
48  * Note 2: xchg has side effect, so that attribute volatile is necessary,
49  *   but generally the primitive is invalid, *ptr is output argument. --ANK
50  */
__xchg(unsigned long x,volatile void * ptr,int size)51 static always_inline unsigned long __xchg(
52     unsigned long x, volatile void *ptr, int size)
53 {
54     switch ( size )
55     {
56     case 1:
57         asm volatile ( "xchg %b[x], %[ptr]"
58                        : [x] "+q" (x), [ptr] "+m" (*(volatile uint8_t *)ptr)
59                        :: "memory" );
60         break;
61     case 2:
62         asm volatile ( "xchg %w[x], %[ptr]"
63                        : [x] "+r" (x), [ptr] "+m" (*(volatile uint16_t *)ptr)
64                        :: "memory" );
65         break;
66     case 4:
67         asm volatile ( "xchg %k[x], %[ptr]"
68                        : [x] "+r" (x), [ptr] "+m" (*(volatile uint32_t *)ptr)
69                        :: "memory" );
70         break;
71     case 8:
72         asm volatile ( "xchg %q[x], %[ptr]"
73                        : [x] "+r" (x), [ptr] "+m" (*(volatile uint64_t *)ptr)
74                        :: "memory" );
75         break;
76     }
77     return x;
78 }
79 
80 /*
81  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
82  * store NEW in MEM.  Return the initial value in MEM.  Success is
83  * indicated by comparing RETURN with OLD.
84  */
85 
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)86 static always_inline unsigned long __cmpxchg(
87     volatile void *ptr, unsigned long old, unsigned long new, int size)
88 {
89     unsigned long prev;
90     switch ( size )
91     {
92     case 1:
93         asm volatile ( "lock cmpxchg %b[new], %[ptr]"
94                        : "=a" (prev), [ptr] "+m" (*(volatile uint8_t *)ptr)
95                        : [new] "q" (new), "a" (old)
96                        : "memory" );
97         return prev;
98     case 2:
99         asm volatile ( "lock cmpxchg %w[new], %[ptr]"
100                        : "=a" (prev), [ptr] "+m" (*(volatile uint16_t *)ptr)
101                        : [new] "r" (new), "a" (old)
102                        : "memory" );
103         return prev;
104     case 4:
105         asm volatile ( "lock cmpxchg %k[new], %[ptr]"
106                        : "=a" (prev), [ptr] "+m" (*(volatile uint32_t *)ptr)
107                        : [new] "r" (new), "a" (old)
108                        : "memory" );
109         return prev;
110     case 8:
111         asm volatile ( "lock cmpxchg %q[new], %[ptr]"
112                        : "=a" (prev), [ptr] "+m" (*(volatile uint64_t *)ptr)
113                        : [new] "r" (new), "a" (old)
114                        : "memory" );
115         return prev;
116     }
117     return old;
118 }
119 
cmpxchg_local_(void * ptr,unsigned long old,unsigned long new,unsigned int size)120 static always_inline unsigned long cmpxchg_local_(
121     void *ptr, unsigned long old, unsigned long new, unsigned int size)
122 {
123     unsigned long prev = ~old;
124 
125     switch ( size )
126     {
127     case 1:
128         asm volatile ( "cmpxchg %b[new], %[ptr]"
129                        : "=a" (prev), [ptr] "+m" (*(uint8_t *)ptr)
130                        : [new] "q" (new), "a" (old) );
131         break;
132     case 2:
133         asm volatile ( "cmpxchg %w[new], %[ptr]"
134                        : "=a" (prev), [ptr] "+m" (*(uint16_t *)ptr)
135                        : [new] "r" (new), "a" (old) );
136         break;
137     case 4:
138         asm volatile ( "cmpxchg %k[new], %[ptr]"
139                        : "=a" (prev), [ptr] "+m" (*(uint32_t *)ptr)
140                        : [new] "r" (new), "a" (old) );
141         break;
142     case 8:
143         asm volatile ( "cmpxchg %q[new], %[ptr]"
144                        : "=a" (prev), [ptr] "+m" (*(uint64_t *)ptr)
145                        : [new] "r" (new), "a" (old) );
146         break;
147     }
148 
149     return prev;
150 }
151 
152 /*
153  * Undefined symbol to cause link failure if a wrong size is used with
154  * arch_fetch_and_add().
155  */
156 extern unsigned long __bad_fetch_and_add_size(void);
157 
__xadd(volatile void * ptr,unsigned long v,int size)158 static always_inline unsigned long __xadd(
159     volatile void *ptr, unsigned long v, int size)
160 {
161     switch ( size )
162     {
163     case 1:
164         asm volatile ( "lock xadd %b[v], %[ptr]"
165                        : [v] "+q" (v), [ptr] "+m" (*(volatile uint8_t *)ptr)
166                        :: "memory");
167         return v;
168     case 2:
169         asm volatile ( "lock xadd %w[v], %[ptr]"
170                        : [v] "+r" (v), [ptr] "+m" (*(volatile uint16_t *)ptr)
171                        :: "memory");
172         return v;
173     case 4:
174         asm volatile ( "lock xadd %k[v], %[ptr]"
175                        : [v] "+r" (v), [ptr] "+m" (*(volatile uint32_t *)ptr)
176                        :: "memory");
177         return v;
178     case 8:
179         asm volatile ( "lock xadd %q[v], %[ptr]"
180                        : [v] "+r" (v), [ptr] "+m" (*(volatile uint64_t *)ptr)
181                        :: "memory");
182 
183         return v;
184     default:
185         return __bad_fetch_and_add_size();
186     }
187 }
188 
189 /*
190  * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr.  Returns
191  * the previous value.
192  *
193  * This is a full memory barrier.
194  */
195 #define arch_fetch_and_add(ptr, v) \
196     ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr))))
197 
198 /*
199  * Mandatory barriers, for enforced ordering of reads and writes, e.g. for use
200  * with MMIO devices mapped with reduced cacheability.
201  */
202 #define mb()            asm volatile ( "mfence" ::: "memory" )
203 #define rmb()           asm volatile ( "lfence" ::: "memory" )
204 #define wmb()           asm volatile ( "sfence" ::: "memory" )
205 
206 /*
207  * SMP barriers, for ordering of reads and writes between CPUs, most commonly
208  * used with shared memory.
209  *
210  * Both Intel and AMD agree that, from a programmer's viewpoint:
211  *  Loads cannot be reordered relative to other loads.
212  *  Stores cannot be reordered relative to other stores.
213  *  Loads may be reordered ahead of a unaliasing stores.
214  *
215  * Refer to the vendor system programming manuals for further details.
216  */
217 #define smp_mb()        asm volatile ( "lock addl $0, -4(%%rsp)" ::: "memory" )
218 #define smp_rmb()       barrier()
219 #define smp_wmb()       barrier()
220 
221 #define set_mb(var, value) do { xchg(&var, value); } while (0)
222 #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
223 
224 #define smp_mb__before_atomic()    do { } while (0)
225 #define smp_mb__after_atomic()     do { } while (0)
226 
227 #define local_irq_disable()     asm volatile ( "cli" : : : "memory" )
228 #define local_irq_enable()      asm volatile ( "sti" : : : "memory" )
229 
230 /* used in the idle loop; sti takes one instruction cycle to complete */
231 #define safe_halt()     asm volatile ( "sti; hlt" : : : "memory" )
232 /* used when interrupts are already enabled or to shutdown the processor */
233 #define halt()          asm volatile ( "hlt" : : : "memory" )
234 
235 #define local_save_flags(x)                                      \
236 ({                                                               \
237     BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
238     asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \
239 })
240 #define local_irq_save(x)                                        \
241 ({                                                               \
242     local_save_flags(x);                                         \
243     local_irq_disable();                                         \
244 })
245 #define local_irq_restore(x)                                     \
246 ({                                                               \
247     BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
248     asm volatile ( "pushfq\n\t"                                  \
249                    "andq %0, (%%rsp)\n\t"                        \
250                    "orq  %1, (%%rsp)\n\t"                        \
251                    "popfq"                                       \
252                    : : "i?r" ( ~X86_EFLAGS_IF ),                 \
253                        "ri" ( (x) & X86_EFLAGS_IF ) );           \
254 })
255 
local_irq_is_enabled(void)256 static inline int local_irq_is_enabled(void)
257 {
258     unsigned long flags;
259     local_save_flags(flags);
260     return !!(flags & X86_EFLAGS_IF);
261 }
262 
263 #define BROKEN_ACPI_Sx          0x0001
264 #define BROKEN_INIT_AFTER_S1    0x0002
265 
266 void trap_init(void);
267 void init_idt_traps(void);
268 void load_system_tables(void);
269 void percpu_traps_init(void);
270 void subarch_percpu_traps_init(void);
271 
272 #endif
273