1 #ifndef __ARCH_X86_ATOMIC__
2 #define __ARCH_X86_ATOMIC__
3 
4 #include <xen/atomic.h>
5 #include <asm/system.h>
6 
7 #define build_read_atomic(name, size, type, reg) \
8 static inline type name(const volatile type *addr) \
9 { \
10     type ret; \
11     asm volatile ( "mov" size " %1,%0" : reg (ret) : "m" (*addr) ); \
12     return ret; \
13 }
14 
15 #define build_write_atomic(name, size, type, reg) \
16 static inline void name(volatile type *addr, type val) \
17 { \
18     asm volatile ( "mov" size " %1,%0" : "=m" (*addr) : reg (val) ); \
19 }
20 
21 #define build_add_sized(name, size, type, reg) \
22     static inline void name(volatile type *addr, type val)              \
23     {                                                                   \
24         asm volatile("add" size " %1,%0"                                \
25                      : "=m" (*addr)                                     \
26                      : reg (val));                                      \
27     }
28 
29 build_read_atomic(read_u8_atomic, "b", uint8_t, "=q")
30 build_read_atomic(read_u16_atomic, "w", uint16_t, "=r")
31 build_read_atomic(read_u32_atomic, "l", uint32_t, "=r")
32 build_read_atomic(read_u64_atomic, "q", uint64_t, "=r")
33 
34 build_write_atomic(write_u8_atomic, "b", uint8_t, "q")
35 build_write_atomic(write_u16_atomic, "w", uint16_t, "r")
36 build_write_atomic(write_u32_atomic, "l", uint32_t, "r")
37 build_write_atomic(write_u64_atomic, "q", uint64_t, "r")
38 
39 build_add_sized(add_u8_sized, "b", uint8_t, "qi")
40 build_add_sized(add_u16_sized, "w", uint16_t, "ri")
41 build_add_sized(add_u32_sized, "l", uint32_t, "ri")
42 build_add_sized(add_u64_sized, "q", uint64_t, "ri")
43 
44 #undef build_read_atomic
45 #undef build_write_atomic
46 #undef build_add_sized
47 
48 void __bad_atomic_size(void);
49 
50 #define read_atomic(p) ({                                 \
51     unsigned long x_;                                     \
52     CLANG_DISABLE_WARN_GCC_COMPAT_START                   \
53     switch ( sizeof(*(p)) ) {                             \
54     case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;   \
55     case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
56     case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
57     case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
58     default: x_ = 0; __bad_atomic_size(); break;          \
59     }                                                     \
60     CLANG_DISABLE_WARN_GCC_COMPAT_END                     \
61     (typeof(*(p)))x_;                                     \
62 })
63 
64 #define write_atomic(p, x) ({                             \
65     typeof(*(p)) __x = (x);                               \
66     unsigned long x_ = (unsigned long)__x;                \
67     switch ( sizeof(*(p)) ) {                             \
68     case 1: write_u8_atomic((uint8_t *)(p), x_); break;   \
69     case 2: write_u16_atomic((uint16_t *)(p), x_); break; \
70     case 4: write_u32_atomic((uint32_t *)(p), x_); break; \
71     case 8: write_u64_atomic((uint64_t *)(p), x_); break; \
72     default: __bad_atomic_size(); break;                  \
73     }                                                     \
74 })
75 
76 #define add_sized(p, x) ({                                \
77     typeof(*(p)) x_ = (x);                                \
78     switch ( sizeof(*(p)) )                               \
79     {                                                     \
80     case 1: add_u8_sized((uint8_t *)(p), x_); break;      \
81     case 2: add_u16_sized((uint16_t *)(p), x_); break;    \
82     case 4: add_u32_sized((uint32_t *)(p), x_); break;    \
83     case 8: add_u64_sized((uint64_t *)(p), x_); break;    \
84     default: __bad_atomic_size(); break;                  \
85     }                                                     \
86 })
87 
atomic_read(const atomic_t * v)88 static inline int atomic_read(const atomic_t *v)
89 {
90     return read_atomic(&v->counter);
91 }
92 
_atomic_read(atomic_t v)93 static inline int _atomic_read(atomic_t v)
94 {
95     return v.counter;
96 }
97 
atomic_set(atomic_t * v,int i)98 static inline void atomic_set(atomic_t *v, int i)
99 {
100     write_atomic(&v->counter, i);
101 }
102 
_atomic_set(atomic_t * v,int i)103 static inline void _atomic_set(atomic_t *v, int i)
104 {
105     v->counter = i;
106 }
107 
atomic_cmpxchg(atomic_t * v,int old,int new)108 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
109 {
110     return cmpxchg(&v->counter, old, new);
111 }
112 
atomic_add(int i,atomic_t * v)113 static inline void atomic_add(int i, atomic_t *v)
114 {
115     asm volatile (
116         "lock; addl %1,%0"
117         : "=m" (*(volatile int *)&v->counter)
118         : "ir" (i), "m" (*(volatile int *)&v->counter) );
119 }
120 
atomic_add_return(int i,atomic_t * v)121 static inline int atomic_add_return(int i, atomic_t *v)
122 {
123     return i + arch_fetch_and_add(&v->counter, i);
124 }
125 
atomic_sub(int i,atomic_t * v)126 static inline void atomic_sub(int i, atomic_t *v)
127 {
128     asm volatile (
129         "lock; subl %1,%0"
130         : "=m" (*(volatile int *)&v->counter)
131         : "ir" (i), "m" (*(volatile int *)&v->counter) );
132 }
133 
atomic_sub_return(int i,atomic_t * v)134 static inline int atomic_sub_return(int i, atomic_t *v)
135 {
136     return arch_fetch_and_add(&v->counter, -i) - i;
137 }
138 
atomic_sub_and_test(int i,atomic_t * v)139 static inline int atomic_sub_and_test(int i, atomic_t *v)
140 {
141     bool c;
142 
143     asm volatile ( "lock; subl %[i], %[counter]\n\t"
144                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
145                    : [counter] "+m" (*(volatile int *)&v->counter),
146                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
147                    : [i] "ir" (i) : "memory" );
148 
149     return c;
150 }
151 
atomic_inc(atomic_t * v)152 static inline void atomic_inc(atomic_t *v)
153 {
154     asm volatile (
155         "lock; incl %0"
156         : "=m" (*(volatile int *)&v->counter)
157         : "m" (*(volatile int *)&v->counter) );
158 }
159 
atomic_inc_return(atomic_t * v)160 static inline int atomic_inc_return(atomic_t *v)
161 {
162     return atomic_add_return(1, v);
163 }
164 
atomic_inc_and_test(atomic_t * v)165 static inline int atomic_inc_and_test(atomic_t *v)
166 {
167     bool c;
168 
169     asm volatile ( "lock; incl %[counter]\n\t"
170                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
171                    : [counter] "+m" (*(volatile int *)&v->counter),
172                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
173                    :: "memory" );
174 
175     return c;
176 }
177 
atomic_dec(atomic_t * v)178 static inline void atomic_dec(atomic_t *v)
179 {
180     asm volatile (
181         "lock; decl %0"
182         : "=m" (*(volatile int *)&v->counter)
183         : "m" (*(volatile int *)&v->counter) );
184 }
185 
atomic_dec_return(atomic_t * v)186 static inline int atomic_dec_return(atomic_t *v)
187 {
188     return atomic_sub_return(1, v);
189 }
190 
atomic_dec_and_test(atomic_t * v)191 static inline int atomic_dec_and_test(atomic_t *v)
192 {
193     bool c;
194 
195     asm volatile ( "lock; decl %[counter]\n\t"
196                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
197                    : [counter] "+m" (*(volatile int *)&v->counter),
198                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
199                    :: "memory" );
200 
201     return c;
202 }
203 
atomic_add_negative(int i,atomic_t * v)204 static inline int atomic_add_negative(int i, atomic_t *v)
205 {
206     bool c;
207 
208     asm volatile ( "lock; addl %[i], %[counter]\n\t"
209                    ASM_FLAG_OUT(, "sets %[sf]\n\t")
210                    : [counter] "+m" (*(volatile int *)&v->counter),
211                      [sf] ASM_FLAG_OUT("=@ccs", "=qm") (c)
212                    : [i] "ir" (i) : "memory" );
213 
214     return c;
215 }
216 
atomic_add_unless(atomic_t * v,int a,int u)217 static inline int atomic_add_unless(atomic_t *v, int a, int u)
218 {
219     int c, old;
220 
221     c = atomic_read(v);
222     while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
223         c = old;
224     return c;
225 }
226 
227 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
228 
229 #endif /* __ARCH_X86_ATOMIC__ */
230