1 #ifndef __ARCH_ARM_ATOMIC__
2 #define __ARCH_ARM_ATOMIC__
3 
4 #include <xen/atomic.h>
5 #include <xen/prefetch.h>
6 #include <asm/system.h>
7 
8 #define build_atomic_read(name, size, width, type, reg)\
9 static inline type name(const volatile type *addr) \
10 {                                                  \
11     type ret;                                      \
12     asm volatile("ldr" size " %" width "0,%1"      \
13                  : reg (ret)                       \
14                  : "m" (*(volatile type *)addr));  \
15     return ret;                                    \
16 }
17 
18 #define build_atomic_write(name, size, width, type, reg) \
19 static inline void name(volatile type *addr, type val) \
20 {                                                      \
21     asm volatile("str" size " %"width"1,%0"            \
22                  : "=m" (*(volatile type *)addr)       \
23                  : reg (val));                         \
24 }
25 
26 #define build_add_sized(name, size, width, type, reg) \
27 static inline void name(volatile type *addr, type val)                  \
28 {                                                                       \
29     type t;                                                             \
30     asm volatile("ldr" size " %"width"1,%0\n"                           \
31                  "add %"width"1,%"width"1,%"width"2\n"                  \
32                  "str" size " %"width"1,%0"                             \
33                  : "=m" (*(volatile type *)addr), "=r" (t)              \
34                  : reg (val));                                          \
35 }
36 
37 #if defined (CONFIG_ARM_32)
38 #define BYTE ""
39 #define WORD ""
40 #elif defined (CONFIG_ARM_64)
41 #define BYTE "w"
42 #define WORD "w"
43 #endif
44 
45 build_atomic_read(read_u8_atomic,  "b", BYTE, uint8_t, "=r")
46 build_atomic_read(read_u16_atomic, "h", WORD, uint16_t, "=r")
47 build_atomic_read(read_u32_atomic, "",  WORD, uint32_t, "=r")
48 build_atomic_read(read_int_atomic, "",  WORD, int, "=r")
49 
50 build_atomic_write(write_u8_atomic,  "b", BYTE, uint8_t, "r")
51 build_atomic_write(write_u16_atomic, "h", WORD, uint16_t, "r")
52 build_atomic_write(write_u32_atomic, "",  WORD, uint32_t, "r")
53 build_atomic_write(write_int_atomic, "",  WORD, int, "r")
54 
55 #if defined (CONFIG_ARM_64)
56 build_atomic_read(read_u64_atomic, "", "", uint64_t, "=r")
57 build_atomic_write(write_u64_atomic, "", "", uint64_t, "r")
58 #endif
59 
60 build_add_sized(add_u8_sized, "b", BYTE, uint8_t, "ri")
61 build_add_sized(add_u16_sized, "h", WORD, uint16_t, "ri")
62 build_add_sized(add_u32_sized, "", WORD, uint32_t, "ri")
63 
64 void __bad_atomic_size(void);
65 
66 #define read_atomic(p) ({                                               \
67     typeof(*p) __x;                                                     \
68     switch ( sizeof(*p) ) {                                             \
69     case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
70     case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
71     case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
72     default: __x = 0; __bad_atomic_size(); break;                       \
73     }                                                                   \
74     __x;                                                                \
75 })
76 
77 #define write_atomic(p, x) ({                                           \
78     typeof(*p) __x = (x);                                               \
79     switch ( sizeof(*p) ) {                                             \
80     case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
81     case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
82     case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
83     default: __bad_atomic_size(); break;                                \
84     }                                                                   \
85     __x;                                                                \
86 })
87 
88 #define add_sized(p, x) ({                                              \
89     typeof(*(p)) __x = (x);                                             \
90     switch ( sizeof(*(p)) )                                             \
91     {                                                                   \
92     case 1: add_u8_sized((uint8_t *)(p), __x); break;                   \
93     case 2: add_u16_sized((uint16_t *)(p), __x); break;                 \
94     case 4: add_u32_sized((uint32_t *)(p), __x); break;                 \
95     default: __bad_atomic_size(); break;                                \
96     }                                                                   \
97 })
98 
99 /*
100  * On ARM, ordinary assignment (str instruction) doesn't clear the local
101  * strex/ldrex monitor on some implementations. The reason we can use it for
102  * atomic_set() is the clrex or dummy strex done on every exception return.
103  */
atomic_read(const atomic_t * v)104 static inline int atomic_read(const atomic_t *v)
105 {
106     return *(volatile int *)&v->counter;
107 }
108 
_atomic_read(atomic_t v)109 static inline int _atomic_read(atomic_t v)
110 {
111     return v.counter;
112 }
113 
atomic_set(atomic_t * v,int i)114 static inline void atomic_set(atomic_t *v, int i)
115 {
116     v->counter = i;
117 }
118 
_atomic_set(atomic_t * v,int i)119 static inline void _atomic_set(atomic_t *v, int i)
120 {
121     v->counter = i;
122 }
123 
124 #if defined(CONFIG_ARM_32)
125 # include <asm/arm32/atomic.h>
126 #elif defined(CONFIG_ARM_64)
127 # include <asm/arm64/atomic.h>
128 #else
129 # error "unknown ARM variant"
130 #endif
131 
atomic_sub_and_test(int i,atomic_t * v)132 static inline int atomic_sub_and_test(int i, atomic_t *v)
133 {
134     return atomic_sub_return(i, v) == 0;
135 }
136 
atomic_inc(atomic_t * v)137 static inline void atomic_inc(atomic_t *v)
138 {
139     atomic_add(1, v);
140 }
141 
atomic_inc_return(atomic_t * v)142 static inline int atomic_inc_return(atomic_t *v)
143 {
144     return atomic_add_return(1, v);
145 }
146 
atomic_inc_and_test(atomic_t * v)147 static inline int atomic_inc_and_test(atomic_t *v)
148 {
149     return atomic_add_return(1, v) == 0;
150 }
151 
atomic_dec(atomic_t * v)152 static inline void atomic_dec(atomic_t *v)
153 {
154     atomic_sub(1, v);
155 }
156 
atomic_dec_return(atomic_t * v)157 static inline int atomic_dec_return(atomic_t *v)
158 {
159     return atomic_sub_return(1, v);
160 }
161 
atomic_dec_and_test(atomic_t * v)162 static inline int atomic_dec_and_test(atomic_t *v)
163 {
164     return atomic_sub_return(1, v) == 0;
165 }
166 
atomic_add_negative(int i,atomic_t * v)167 static inline int atomic_add_negative(int i, atomic_t *v)
168 {
169     return atomic_add_return(i, v) < 0;
170 }
171 
atomic_add_unless(atomic_t * v,int a,int u)172 static inline int atomic_add_unless(atomic_t *v, int a, int u)
173 {
174     return __atomic_add_unless(v, a, u);
175 }
176 
177 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
178 
179 #endif /* __ARCH_ARM_ATOMIC__ */
180 /*
181  * Local variables:
182  * mode: C
183  * c-file-style: "BSD"
184  * c-basic-offset: 4
185  * indent-tabs-mode: nil
186  * End:
187  */
188