1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <xen/lib.h>
5 #include <xen/bitops.h>
6 #include <asm/processor.h>
7
8 #define read_sreg(name) \
9 ({ unsigned int __sel; \
10 asm volatile ( "mov %%" STR(name) ",%0" : "=r" (__sel) ); \
11 __sel; \
12 })
13
14 #define wbinvd() \
15 asm volatile ( "wbinvd" : : : "memory" )
16
17 #define clflush(a) \
18 asm volatile ( "clflush (%0)" : : "r"(a) )
19
20 #define nop() \
21 asm volatile ( "nop" )
22
23 #define xchg(ptr,v) \
24 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
25
26 struct __xchg_dummy { unsigned long a[100]; };
27 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
28
29 #include <asm/x86_64/system.h>
30
31 /*
32 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
33 * Note 2: xchg has side effect, so that attribute volatile is necessary,
34 * but generally the primitive is invalid, *ptr is output argument. --ANK
35 */
__xchg(unsigned long x,volatile void * ptr,int size)36 static always_inline unsigned long __xchg(
37 unsigned long x, volatile void *ptr, int size)
38 {
39 switch ( size )
40 {
41 case 1:
42 asm volatile ( "xchgb %b0,%1"
43 : "=q" (x)
44 : "m" (*__xg(ptr)), "0" (x)
45 : "memory" );
46 break;
47 case 2:
48 asm volatile ( "xchgw %w0,%1"
49 : "=r" (x)
50 : "m" (*__xg(ptr)), "0" (x)
51 : "memory" );
52 break;
53 case 4:
54 asm volatile ( "xchgl %k0,%1"
55 : "=r" (x)
56 : "m" (*__xg(ptr)), "0" (x)
57 : "memory" );
58 break;
59 case 8:
60 asm volatile ( "xchgq %0,%1"
61 : "=r" (x)
62 : "m" (*__xg(ptr)), "0" (x)
63 : "memory" );
64 break;
65 }
66 return x;
67 }
68
69 /*
70 * Atomic compare and exchange. Compare OLD with MEM, if identical,
71 * store NEW in MEM. Return the initial value in MEM. Success is
72 * indicated by comparing RETURN with OLD.
73 */
74
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)75 static always_inline unsigned long __cmpxchg(
76 volatile void *ptr, unsigned long old, unsigned long new, int size)
77 {
78 unsigned long prev;
79 switch ( size )
80 {
81 case 1:
82 asm volatile ( "lock; cmpxchgb %b1,%2"
83 : "=a" (prev)
84 : "q" (new), "m" (*__xg(ptr)),
85 "0" (old)
86 : "memory" );
87 return prev;
88 case 2:
89 asm volatile ( "lock; cmpxchgw %w1,%2"
90 : "=a" (prev)
91 : "r" (new), "m" (*__xg(ptr)),
92 "0" (old)
93 : "memory" );
94 return prev;
95 case 4:
96 asm volatile ( "lock; cmpxchgl %k1,%2"
97 : "=a" (prev)
98 : "r" (new), "m" (*__xg(ptr)),
99 "0" (old)
100 : "memory" );
101 return prev;
102 case 8:
103 asm volatile ( "lock; cmpxchgq %1,%2"
104 : "=a" (prev)
105 : "r" (new), "m" (*__xg(ptr)),
106 "0" (old)
107 : "memory" );
108 return prev;
109 }
110 return old;
111 }
112
113 #define cmpxchgptr(ptr,o,n) ({ \
114 const __typeof__(**(ptr)) *__o = (o); \
115 __typeof__(**(ptr)) *__n = (n); \
116 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)__o, \
117 (unsigned long)__n,sizeof(*(ptr)))); \
118 })
119
120 /*
121 * Undefined symbol to cause link failure if a wrong size is used with
122 * arch_fetch_and_add().
123 */
124 extern unsigned long __bad_fetch_and_add_size(void);
125
__xadd(volatile void * ptr,unsigned long v,int size)126 static always_inline unsigned long __xadd(
127 volatile void *ptr, unsigned long v, int size)
128 {
129 switch ( size )
130 {
131 case 1:
132 asm volatile ( "lock; xaddb %b0,%1"
133 : "+r" (v), "+m" (*__xg(ptr))
134 :: "memory");
135 return v;
136 case 2:
137 asm volatile ( "lock; xaddw %w0,%1"
138 : "+r" (v), "+m" (*__xg(ptr))
139 :: "memory");
140 return v;
141 case 4:
142 asm volatile ( "lock; xaddl %k0,%1"
143 : "+r" (v), "+m" (*__xg(ptr))
144 :: "memory");
145 return v;
146 case 8:
147 asm volatile ( "lock; xaddq %q0,%1"
148 : "+r" (v), "+m" (*__xg(ptr))
149 :: "memory");
150
151 return v;
152 default:
153 return __bad_fetch_and_add_size();
154 }
155 }
156
157 /*
158 * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns
159 * the previous value.
160 *
161 * This is a full memory barrier.
162 */
163 #define arch_fetch_and_add(ptr, v) \
164 ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr))))
165
166 /*
167 * Both Intel and AMD agree that, from a programmer's viewpoint:
168 * Loads cannot be reordered relative to other loads.
169 * Stores cannot be reordered relative to other stores.
170 *
171 * Intel64 Architecture Memory Ordering White Paper
172 * <http://developer.intel.com/products/processor/manuals/318147.pdf>
173 *
174 * AMD64 Architecture Programmer's Manual, Volume 2: System Programming
175 * <http://www.amd.com/us-en/assets/content_type/\
176 * white_papers_and_tech_docs/24593.pdf>
177 */
178 #define rmb() barrier()
179 #define wmb() barrier()
180
181 #define smp_mb() mb()
182 #define smp_rmb() rmb()
183 #define smp_wmb() wmb()
184
185 #define set_mb(var, value) do { xchg(&var, value); } while (0)
186 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
187
188 #define local_irq_disable() asm volatile ( "cli" : : : "memory" )
189 #define local_irq_enable() asm volatile ( "sti" : : : "memory" )
190
191 /* used in the idle loop; sti takes one instruction cycle to complete */
192 #define safe_halt() asm volatile ( "sti; hlt" : : : "memory" )
193 /* used when interrupts are already enabled or to shutdown the processor */
194 #define halt() asm volatile ( "hlt" : : : "memory" )
195
196 #define local_save_flags(x) \
197 ({ \
198 BUILD_BUG_ON(sizeof(x) != sizeof(long)); \
199 asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \
200 })
201 #define local_irq_save(x) \
202 ({ \
203 local_save_flags(x); \
204 local_irq_disable(); \
205 })
206 #define local_irq_restore(x) \
207 ({ \
208 BUILD_BUG_ON(sizeof(x) != sizeof(long)); \
209 asm volatile ( "pushfq\n\t" \
210 "andq %0, (%%rsp)\n\t" \
211 "orq %1, (%%rsp)\n\t" \
212 "popfq" \
213 : : "i?r" ( ~X86_EFLAGS_IF ), \
214 "ri" ( (x) & X86_EFLAGS_IF ) ); \
215 })
216
local_irq_is_enabled(void)217 static inline int local_irq_is_enabled(void)
218 {
219 unsigned long flags;
220 local_save_flags(flags);
221 return !!(flags & X86_EFLAGS_IF);
222 }
223
224 #define BROKEN_ACPI_Sx 0x0001
225 #define BROKEN_INIT_AFTER_S1 0x0002
226
227 void trap_init(void);
228 void init_idt_traps(void);
229 void load_system_tables(void);
230 void percpu_traps_init(void);
231 void subarch_percpu_traps_init(void);
232
233 #endif
234