1 /*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #ifndef __ARCH_ARM_ARM32_ATOMIC__
12 #define __ARCH_ARM_ARM32_ATOMIC__
13
14 /*
15 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
16 * store exclusive to ensure that these are atomic. We may loop
17 * to ensure that the update happens.
18 */
atomic_add(int i,atomic_t * v)19 static inline void atomic_add(int i, atomic_t *v)
20 {
21 unsigned long tmp;
22 int result;
23
24 prefetchw(&v->counter);
25 __asm__ __volatile__("@ atomic_add\n"
26 "1: ldrex %0, [%3]\n"
27 " add %0, %0, %4\n"
28 " strex %1, %0, [%3]\n"
29 " teq %1, #0\n"
30 " bne 1b"
31 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
32 : "r" (&v->counter), "Ir" (i)
33 : "cc");
34 }
35
atomic_add_return(int i,atomic_t * v)36 static inline int atomic_add_return(int i, atomic_t *v)
37 {
38 unsigned long tmp;
39 int result;
40
41 smp_mb();
42 prefetchw(&v->counter);
43
44 __asm__ __volatile__("@ atomic_add_return\n"
45 "1: ldrex %0, [%3]\n"
46 " add %0, %0, %4\n"
47 " strex %1, %0, [%3]\n"
48 " teq %1, #0\n"
49 " bne 1b"
50 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
51 : "r" (&v->counter), "Ir" (i)
52 : "cc");
53
54 smp_mb();
55
56 return result;
57 }
58
atomic_sub(int i,atomic_t * v)59 static inline void atomic_sub(int i, atomic_t *v)
60 {
61 unsigned long tmp;
62 int result;
63
64 prefetchw(&v->counter);
65 __asm__ __volatile__("@ atomic_sub\n"
66 "1: ldrex %0, [%3]\n"
67 " sub %0, %0, %4\n"
68 " strex %1, %0, [%3]\n"
69 " teq %1, #0\n"
70 " bne 1b"
71 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
72 : "r" (&v->counter), "Ir" (i)
73 : "cc");
74 }
75
atomic_sub_return(int i,atomic_t * v)76 static inline int atomic_sub_return(int i, atomic_t *v)
77 {
78 unsigned long tmp;
79 int result;
80
81 smp_mb();
82 prefetchw(&v->counter);
83
84 __asm__ __volatile__("@ atomic_sub_return\n"
85 "1: ldrex %0, [%3]\n"
86 " sub %0, %0, %4\n"
87 " strex %1, %0, [%3]\n"
88 " teq %1, #0\n"
89 " bne 1b"
90 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
91 : "r" (&v->counter), "Ir" (i)
92 : "cc");
93
94 smp_mb();
95
96 return result;
97 }
98
atomic_cmpxchg(atomic_t * ptr,int old,int new)99 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
100 {
101 int oldval;
102 unsigned long res;
103
104 smp_mb();
105 prefetchw(&ptr->counter);
106
107 do {
108 __asm__ __volatile__("@ atomic_cmpxchg\n"
109 "ldrex %1, [%3]\n"
110 "mov %0, #0\n"
111 "teq %1, %4\n"
112 "strexeq %0, %5, [%3]\n"
113 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
114 : "r" (&ptr->counter), "Ir" (old), "r" (new)
115 : "cc");
116 } while (res);
117
118 smp_mb();
119
120 return oldval;
121 }
122
__atomic_add_unless(atomic_t * v,int a,int u)123 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
124 {
125 int oldval, newval;
126 unsigned long tmp;
127
128 smp_mb();
129 prefetchw(&v->counter);
130
131 __asm__ __volatile__ ("@ atomic_add_unless\n"
132 "1: ldrex %0, [%4]\n"
133 " teq %0, %5\n"
134 " beq 2f\n"
135 " add %1, %0, %6\n"
136 " strex %2, %1, [%4]\n"
137 " teq %2, #0\n"
138 " bne 1b\n"
139 "2:"
140 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
141 : "r" (&v->counter), "r" (u), "r" (a)
142 : "cc");
143
144 if (oldval != u)
145 smp_mb();
146
147 return oldval;
148 }
149
150 #endif /* __ARCH_ARM_ARM32_ATOMIC__ */
151 /*
152 * Local variables:
153 * mode: C
154 * c-file-style: "BSD"
155 * c-basic-offset: 8
156 * indent-tabs-mode: t
157 * End:
158 */
159