1 /*
2  * Based on arch/arm64/include/asm/atomic.h
3  * which in turn is
4  * Based on arch/arm/include/asm/atomic.h
5  *
6  * Copyright (C) 1996 Russell King.
7  * Copyright (C) 2002 Deep Blue Solutions Ltd.
8  * Copyright (C) 2012 ARM Ltd.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 #ifndef __ARCH_ARM_ARM64_ATOMIC
23 #define __ARCH_ARM_ARM64_ATOMIC
24 
25 /*
26  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
27  * store exclusive to ensure that these are atomic.  We may loop
28  * to ensure that the update happens.
29  */
atomic_add(int i,atomic_t * v)30 static inline void atomic_add(int i, atomic_t *v)
31 {
32 	unsigned long tmp;
33 	int result;
34 
35 	asm volatile("// atomic_add\n"
36 "1:	ldxr	%w0, %2\n"
37 "	add	%w0, %w0, %w3\n"
38 "	stxr	%w1, %w0, %2\n"
39 "	cbnz	%w1, 1b"
40 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
41 	: "Ir" (i));
42 }
43 
atomic_add_return(int i,atomic_t * v)44 static inline int atomic_add_return(int i, atomic_t *v)
45 {
46 	unsigned long tmp;
47 	int result;
48 
49 	asm volatile("// atomic_add_return\n"
50 "1:	ldxr	%w0, %2\n"
51 "	add	%w0, %w0, %w3\n"
52 "	stlxr	%w1, %w0, %2\n"
53 "	cbnz	%w1, 1b"
54 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
55 	: "Ir" (i)
56 	: "memory");
57 
58 	smp_mb();
59 	return result;
60 }
61 
atomic_sub(int i,atomic_t * v)62 static inline void atomic_sub(int i, atomic_t *v)
63 {
64 	unsigned long tmp;
65 	int result;
66 
67 	asm volatile("// atomic_sub\n"
68 "1:	ldxr	%w0, %2\n"
69 "	sub	%w0, %w0, %w3\n"
70 "	stxr	%w1, %w0, %2\n"
71 "	cbnz	%w1, 1b"
72 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
73 	: "Ir" (i));
74 }
75 
atomic_sub_return(int i,atomic_t * v)76 static inline int atomic_sub_return(int i, atomic_t *v)
77 {
78 	unsigned long tmp;
79 	int result;
80 
81 	asm volatile("// atomic_sub_return\n"
82 "1:	ldxr	%w0, %2\n"
83 "	sub	%w0, %w0, %w3\n"
84 "	stlxr	%w1, %w0, %2\n"
85 "	cbnz	%w1, 1b"
86 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
87 	: "Ir" (i)
88 	: "memory");
89 
90 	smp_mb();
91 	return result;
92 }
93 
atomic_cmpxchg(atomic_t * ptr,int old,int new)94 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
95 {
96 	unsigned long tmp;
97 	int oldval;
98 
99 	smp_mb();
100 
101 	asm volatile("// atomic_cmpxchg\n"
102 "1:	ldxr	%w1, %2\n"
103 "	cmp	%w1, %w3\n"
104 "	b.ne	2f\n"
105 "	stxr	%w0, %w4, %2\n"
106 "	cbnz	%w0, 1b\n"
107 "2:"
108 	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
109 	: "Ir" (old), "r" (new)
110 	: "cc");
111 
112 	smp_mb();
113 	return oldval;
114 }
115 
__atomic_add_unless(atomic_t * v,int a,int u)116 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
117 {
118 	int c, old;
119 
120 	c = atomic_read(v);
121 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
122 		c = old;
123 	return c;
124 }
125 
126 #endif
127 /*
128  * Local variables:
129  * mode: C
130  * c-file-style: "BSD"
131  * c-basic-offset: 8
132  * indent-tabs-mode: t
133  * End:
134  */
135