1 /*
2  *  arch/arm/include/asm/atomic.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ARCH_ARM_ARM32_ATOMIC__
12 #define __ARCH_ARM_ARM32_ATOMIC__
13 
14 /*
15  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
16  * store exclusive to ensure that these are atomic.  We may loop
17  * to ensure that the update happens.
18  */
atomic_add(int i,atomic_t * v)19 static inline void atomic_add(int i, atomic_t *v)
20 {
21 	unsigned long tmp;
22 	int result;
23 
24 	prefetchw(&v->counter);
25 	__asm__ __volatile__("@ atomic_add\n"
26 "1:	ldrex	%0, [%3]\n"
27 "	add	%0, %0, %4\n"
28 "	strex	%1, %0, [%3]\n"
29 "	teq	%1, #0\n"
30 "	bne	1b"
31 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
32 	: "r" (&v->counter), "Ir" (i)
33 	: "cc");
34 }
35 
atomic_add_return(int i,atomic_t * v)36 static inline int atomic_add_return(int i, atomic_t *v)
37 {
38 	unsigned long tmp;
39 	int result;
40 
41 	smp_mb();
42 	prefetchw(&v->counter);
43 
44 	__asm__ __volatile__("@ atomic_add_return\n"
45 "1:	ldrex	%0, [%3]\n"
46 "	add	%0, %0, %4\n"
47 "	strex	%1, %0, [%3]\n"
48 "	teq	%1, #0\n"
49 "	bne	1b"
50 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
51 	: "r" (&v->counter), "Ir" (i)
52 	: "cc");
53 
54 	smp_mb();
55 
56 	return result;
57 }
58 
atomic_sub(int i,atomic_t * v)59 static inline void atomic_sub(int i, atomic_t *v)
60 {
61 	unsigned long tmp;
62 	int result;
63 
64 	prefetchw(&v->counter);
65 	__asm__ __volatile__("@ atomic_sub\n"
66 "1:	ldrex	%0, [%3]\n"
67 "	sub	%0, %0, %4\n"
68 "	strex	%1, %0, [%3]\n"
69 "	teq	%1, #0\n"
70 "	bne	1b"
71 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
72 	: "r" (&v->counter), "Ir" (i)
73 	: "cc");
74 }
75 
atomic_sub_return(int i,atomic_t * v)76 static inline int atomic_sub_return(int i, atomic_t *v)
77 {
78 	unsigned long tmp;
79 	int result;
80 
81 	smp_mb();
82 	prefetchw(&v->counter);
83 
84 	__asm__ __volatile__("@ atomic_sub_return\n"
85 "1:	ldrex	%0, [%3]\n"
86 "	sub	%0, %0, %4\n"
87 "	strex	%1, %0, [%3]\n"
88 "	teq	%1, #0\n"
89 "	bne	1b"
90 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
91 	: "r" (&v->counter), "Ir" (i)
92 	: "cc");
93 
94 	smp_mb();
95 
96 	return result;
97 }
98 
atomic_and(int m,atomic_t * v)99 static inline void atomic_and(int m, atomic_t *v)
100 {
101 	unsigned long tmp;
102 	int result;
103 
104 	prefetchw(&v->counter);
105 	__asm__ __volatile__("@ atomic_and\n"
106 "1:	ldrex	%0, [%3]\n"
107 "	and	%0, %0, %4\n"
108 "	strex	%1, %0, [%3]\n"
109 "	teq	%1, #0\n"
110 "	bne	1b"
111 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 	: "r" (&v->counter), "Ir" (m)
113 	: "cc");
114 }
115 
atomic_cmpxchg(atomic_t * v,int old,int new)116 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
117 {
118 	int oldval;
119 	unsigned long res;
120 
121 	smp_mb();
122 	prefetchw(&v->counter);
123 
124 	do {
125 		__asm__ __volatile__("@ atomic_cmpxchg\n"
126 		"ldrex	%1, [%3]\n"
127 		"mov	%0, #0\n"
128 		"teq	%1, %4\n"
129 		"strexeq %0, %5, [%3]\n"
130 		    : "=&r" (res), "=&r" (oldval), "+Qo" (v->counter)
131 		    : "r" (&v->counter), "Ir" (old), "r" (new)
132 		    : "cc");
133 	} while (res);
134 
135 	smp_mb();
136 
137 	return oldval;
138 }
139 
__atomic_add_unless(atomic_t * v,int a,int u)140 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
141 {
142 	int oldval, newval;
143 	unsigned long tmp;
144 
145 	smp_mb();
146 	prefetchw(&v->counter);
147 
148 	__asm__ __volatile__ ("@ atomic_add_unless\n"
149 "1:	ldrex	%0, [%4]\n"
150 "	teq	%0, %5\n"
151 "	beq	2f\n"
152 "	add	%1, %0, %6\n"
153 "	strex	%2, %1, [%4]\n"
154 "	teq	%2, #0\n"
155 "	bne	1b\n"
156 "2:"
157 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
158 	: "r" (&v->counter), "r" (u), "r" (a)
159 	: "cc");
160 
161 	if (oldval != u)
162 		smp_mb();
163 
164 	return oldval;
165 }
166 
167 #endif /* __ARCH_ARM_ARM32_ATOMIC__ */
168 /*
169  * Local variables:
170  * mode: C
171  * c-file-style: "BSD"
172  * c-basic-offset: 8
173  * indent-tabs-mode: t
174  * End:
175  */
176