1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
4
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 //#include <asm/cmpxchg.h>
8
9 /* An 64bit atomic type */
10
11 typedef struct {
12 s64 __aligned(8) counter;
13 } atomic64_t;
14
15 #define ATOMIC64_INIT(val) { (val) }
16
17 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
18 #ifndef ATOMIC64_EXPORT
19 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
20 #else
21 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
22 ATOMIC64_EXPORT(atomic64_##sym)
23 #endif
24
25 #ifdef CONFIG_X86_CMPXCHG64
26 #define __alternative_atomic64(f, g, out, in...) \
27 asm volatile("call %P[func]" \
28 : out : [func] "i" (atomic64_##g##_cx8), ## in)
29
30 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
31 #else
32 #define __alternative_atomic64(f, g, out, in...) \
33 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
34 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
35
36 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
37 ATOMIC64_DECL_ONE(sym##_386)
38
39 ATOMIC64_DECL_ONE(add_386);
40 ATOMIC64_DECL_ONE(sub_386);
41 ATOMIC64_DECL_ONE(inc_386);
42 ATOMIC64_DECL_ONE(dec_386);
43 #endif
44
45 #define alternative_atomic64(f, out, in...) \
46 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
47
48 ATOMIC64_DECL(read);
49 ATOMIC64_DECL(set);
50 ATOMIC64_DECL(xchg);
51 ATOMIC64_DECL(add_return);
52 ATOMIC64_DECL(sub_return);
53 ATOMIC64_DECL(inc_return);
54 ATOMIC64_DECL(dec_return);
55 ATOMIC64_DECL(dec_if_positive);
56 ATOMIC64_DECL(inc_not_zero);
57 ATOMIC64_DECL(add_unless);
58
59 #undef ATOMIC64_DECL
60 #undef ATOMIC64_DECL_ONE
61 #undef __ATOMIC64_DECL
62 #undef ATOMIC64_EXPORT
63
64 /**
65 * arch_atomic64_cmpxchg - cmpxchg atomic64 variable
66 * @v: pointer to type atomic64_t
67 * @o: expected value
68 * @n: new value
69 *
70 * Atomically sets @v to @n if it was equal to @o and returns
71 * the old value.
72 */
73
arch_atomic64_cmpxchg(atomic64_t * v,s64 o,s64 n)74 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
75 {
76 return arch_cmpxchg64(&v->counter, o, n);
77 }
78 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
79
80 /**
81 * arch_atomic64_xchg - xchg atomic64 variable
82 * @v: pointer to type atomic64_t
83 * @n: value to assign
84 *
85 * Atomically xchgs the value of @v to @n and returns
86 * the old value.
87 */
arch_atomic64_xchg(atomic64_t * v,s64 n)88 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
89 {
90 s64 o;
91 unsigned high = (unsigned)(n >> 32);
92 unsigned low = (unsigned)n;
93 alternative_atomic64(xchg, "=&A" (o),
94 "S" (v), "b" (low), "c" (high)
95 : "memory");
96 return o;
97 }
98 #define arch_atomic64_xchg arch_atomic64_xchg
99
100 /**
101 * arch_atomic64_set - set atomic64 variable
102 * @v: pointer to type atomic64_t
103 * @i: value to assign
104 *
105 * Atomically sets the value of @v to @n.
106 */
arch_atomic64_set(atomic64_t * v,s64 i)107 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
108 {
109 unsigned high = (unsigned)(i >> 32);
110 unsigned low = (unsigned)i;
111 alternative_atomic64(set, /* no output */,
112 "S" (v), "b" (low), "c" (high)
113 : "eax", "edx", "memory");
114 }
115
116 /**
117 * arch_atomic64_read - read atomic64 variable
118 * @v: pointer to type atomic64_t
119 *
120 * Atomically reads the value of @v and returns it.
121 */
arch_atomic64_read(const atomic64_t * v)122 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
123 {
124 s64 r;
125 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
126 return r;
127 }
128
129 /**
130 * arch_atomic64_add_return - add and return
131 * @i: integer value to add
132 * @v: pointer to type atomic64_t
133 *
134 * Atomically adds @i to @v and returns @i + *@v
135 */
arch_atomic64_add_return(s64 i,atomic64_t * v)136 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
137 {
138 alternative_atomic64(add_return,
139 ASM_OUTPUT2("+A" (i), "+c" (v)),
140 ASM_NO_INPUT_CLOBBER("memory"));
141 return i;
142 }
143 #define arch_atomic64_add_return arch_atomic64_add_return
144
145 /*
146 * Other variants with different arithmetic operators:
147 */
arch_atomic64_sub_return(s64 i,atomic64_t * v)148 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
149 {
150 alternative_atomic64(sub_return,
151 ASM_OUTPUT2("+A" (i), "+c" (v)),
152 ASM_NO_INPUT_CLOBBER("memory"));
153 return i;
154 }
155 #define arch_atomic64_sub_return arch_atomic64_sub_return
156
arch_atomic64_inc_return(atomic64_t * v)157 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
158 {
159 s64 a;
160 alternative_atomic64(inc_return, "=&A" (a),
161 "S" (v) : "memory", "ecx");
162 return a;
163 }
164 #define arch_atomic64_inc_return arch_atomic64_inc_return
165
arch_atomic64_dec_return(atomic64_t * v)166 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
167 {
168 s64 a;
169 alternative_atomic64(dec_return, "=&A" (a),
170 "S" (v) : "memory", "ecx");
171 return a;
172 }
173 #define arch_atomic64_dec_return arch_atomic64_dec_return
174
175 /**
176 * arch_atomic64_add - add integer to atomic64 variable
177 * @i: integer value to add
178 * @v: pointer to type atomic64_t
179 *
180 * Atomically adds @i to @v.
181 */
arch_atomic64_add(s64 i,atomic64_t * v)182 static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
183 {
184 __alternative_atomic64(add, add_return,
185 ASM_OUTPUT2("+A" (i), "+c" (v)),
186 ASM_NO_INPUT_CLOBBER("memory"));
187 return i;
188 }
189
190 /**
191 * arch_atomic64_sub - subtract the atomic64 variable
192 * @i: integer value to subtract
193 * @v: pointer to type atomic64_t
194 *
195 * Atomically subtracts @i from @v.
196 */
arch_atomic64_sub(s64 i,atomic64_t * v)197 static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
198 {
199 __alternative_atomic64(sub, sub_return,
200 ASM_OUTPUT2("+A" (i), "+c" (v)),
201 ASM_NO_INPUT_CLOBBER("memory"));
202 return i;
203 }
204
205 /**
206 * arch_atomic64_inc - increment atomic64 variable
207 * @v: pointer to type atomic64_t
208 *
209 * Atomically increments @v by 1.
210 */
arch_atomic64_inc(atomic64_t * v)211 static __always_inline void arch_atomic64_inc(atomic64_t *v)
212 {
213 __alternative_atomic64(inc, inc_return, /* no output */,
214 "S" (v) : "memory", "eax", "ecx", "edx");
215 }
216 #define arch_atomic64_inc arch_atomic64_inc
217
218 /**
219 * arch_atomic64_dec - decrement atomic64 variable
220 * @v: pointer to type atomic64_t
221 *
222 * Atomically decrements @v by 1.
223 */
arch_atomic64_dec(atomic64_t * v)224 static __always_inline void arch_atomic64_dec(atomic64_t *v)
225 {
226 __alternative_atomic64(dec, dec_return, /* no output */,
227 "S" (v) : "memory", "eax", "ecx", "edx");
228 }
229 #define arch_atomic64_dec arch_atomic64_dec
230
231 /**
232 * arch_atomic64_add_unless - add unless the number is a given value
233 * @v: pointer of type atomic64_t
234 * @a: the amount to add to v...
235 * @u: ...unless v is equal to u.
236 *
237 * Atomically adds @a to @v, so long as it was not @u.
238 * Returns non-zero if the add was done, zero otherwise.
239 */
arch_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)240 static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
241 {
242 unsigned low = (unsigned)u;
243 unsigned high = (unsigned)(u >> 32);
244 alternative_atomic64(add_unless,
245 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
246 "S" (v) : "memory");
247 return (int)a;
248 }
249 #define arch_atomic64_add_unless arch_atomic64_add_unless
250
arch_atomic64_inc_not_zero(atomic64_t * v)251 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
252 {
253 int r;
254 alternative_atomic64(inc_not_zero, "=&a" (r),
255 "S" (v) : "ecx", "edx", "memory");
256 return r;
257 }
258 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
259
arch_atomic64_dec_if_positive(atomic64_t * v)260 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
261 {
262 s64 r;
263 alternative_atomic64(dec_if_positive, "=&A" (r),
264 "S" (v) : "ecx", "memory");
265 return r;
266 }
267 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
268
269 #undef alternative_atomic64
270 #undef __alternative_atomic64
271
arch_atomic64_and(s64 i,atomic64_t * v)272 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
273 {
274 s64 old, c = 0;
275
276 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
277 c = old;
278 }
279
arch_atomic64_fetch_and(s64 i,atomic64_t * v)280 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
281 {
282 s64 old, c = 0;
283
284 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
285 c = old;
286
287 return old;
288 }
289 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
290
arch_atomic64_or(s64 i,atomic64_t * v)291 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
292 {
293 s64 old, c = 0;
294
295 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
296 c = old;
297 }
298
arch_atomic64_fetch_or(s64 i,atomic64_t * v)299 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
300 {
301 s64 old, c = 0;
302
303 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
304 c = old;
305
306 return old;
307 }
308 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
309
arch_atomic64_xor(s64 i,atomic64_t * v)310 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
311 {
312 s64 old, c = 0;
313
314 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
315 c = old;
316 }
317
arch_atomic64_fetch_xor(s64 i,atomic64_t * v)318 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
319 {
320 s64 old, c = 0;
321
322 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
323 c = old;
324
325 return old;
326 }
327 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
328
arch_atomic64_fetch_add(s64 i,atomic64_t * v)329 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
330 {
331 s64 old, c = 0;
332
333 while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
334 c = old;
335
336 return old;
337 }
338 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
339
340 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
341
342 #endif /* _ASM_X86_ATOMIC64_32_H */
343