1 #ifndef _MICROBLAZE_BITOPS_H
2 #define _MICROBLAZE_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8 #include <asm/byteorder.h> /* swab32 */
9 #include <asm/system.h> /* save_flags */
10 #include <asm-generic/bitops/fls.h>
11 #include <asm-generic/bitops/__fls.h>
12 #include <asm-generic/bitops/fls64.h>
13 #include <asm-generic/bitops/__ffs.h>
14
15 #ifdef __KERNEL__
16 /*
17 * The __ functions are not atomic
18 */
19
20 /*
21 * ffz = Find First Zero in word. Undefined if no zero exists,
22 * so code should check against ~0UL first..
23 */
ffz(unsigned long word)24 static inline unsigned long ffz(unsigned long word)
25 {
26 unsigned long result = 0;
27
28 while(word & 1) {
29 result++;
30 word >>= 1;
31 }
32 return result;
33 }
34
set_bit(int nr,volatile void * addr)35 static inline void set_bit(int nr, volatile void *addr)
36 {
37 int * a = (int *) addr;
38 int mask;
39 unsigned long flags;
40
41 a += nr >> 5;
42 mask = 1 << (nr & 0x1f);
43 save_flags_cli(flags);
44 *a |= mask;
45 restore_flags(flags);
46 }
47
__set_bit(int nr,volatile void * addr)48 static inline void __set_bit(int nr, volatile void *addr)
49 {
50 int * a = (int *) addr;
51 int mask;
52
53 a += nr >> 5;
54 mask = 1 << (nr & 0x1f);
55 *a |= mask;
56 }
57 #define PLATFORM__SET_BIT
58
59 /*
60 * clear_bit() doesn't provide any barrier for the compiler.
61 */
62 #define smp_mb__before_clear_bit() barrier()
63 #define smp_mb__after_clear_bit() barrier()
64
clear_bit(int nr,volatile void * addr)65 static inline void clear_bit(int nr, volatile void *addr)
66 {
67 int * a = (int *) addr;
68 int mask;
69 unsigned long flags;
70
71 a += nr >> 5;
72 mask = 1 << (nr & 0x1f);
73 save_flags_cli(flags);
74 *a &= ~mask;
75 restore_flags(flags);
76 }
77
78 #define __clear_bit(nr, addr) clear_bit(nr, addr)
79 #define PLATFORM__CLEAR_BIT
80
change_bit(int nr,volatile void * addr)81 static inline void change_bit(int nr, volatile void *addr)
82 {
83 int mask;
84 unsigned long flags;
85 unsigned long *ADDR = (unsigned long *) addr;
86
87 ADDR += nr >> 5;
88 mask = 1 << (nr & 31);
89 save_flags_cli(flags);
90 *ADDR ^= mask;
91 restore_flags(flags);
92 }
93
__change_bit(int nr,volatile void * addr)94 static inline void __change_bit(int nr, volatile void *addr)
95 {
96 int mask;
97 unsigned long *ADDR = (unsigned long *) addr;
98
99 ADDR += nr >> 5;
100 mask = 1 << (nr & 31);
101 *ADDR ^= mask;
102 }
103
test_and_set_bit(int nr,volatile void * addr)104 static inline int test_and_set_bit(int nr, volatile void *addr)
105 {
106 int mask, retval;
107 volatile unsigned int *a = (volatile unsigned int *) addr;
108 unsigned long flags;
109
110 a += nr >> 5;
111 mask = 1 << (nr & 0x1f);
112 save_flags_cli(flags);
113 retval = (mask & *a) != 0;
114 *a |= mask;
115 restore_flags(flags);
116
117 return retval;
118 }
119
__test_and_set_bit(int nr,volatile void * addr)120 static inline int __test_and_set_bit(int nr, volatile void *addr)
121 {
122 int mask, retval;
123 volatile unsigned int *a = (volatile unsigned int *) addr;
124
125 a += nr >> 5;
126 mask = 1 << (nr & 0x1f);
127 retval = (mask & *a) != 0;
128 *a |= mask;
129 return retval;
130 }
131
test_and_clear_bit(int nr,volatile void * addr)132 static inline int test_and_clear_bit(int nr, volatile void *addr)
133 {
134 int mask, retval;
135 volatile unsigned int *a = (volatile unsigned int *) addr;
136 unsigned long flags;
137
138 a += nr >> 5;
139 mask = 1 << (nr & 0x1f);
140 save_flags_cli(flags);
141 retval = (mask & *a) != 0;
142 *a &= ~mask;
143 restore_flags(flags);
144
145 return retval;
146 }
147
__test_and_clear_bit(int nr,volatile void * addr)148 static inline int __test_and_clear_bit(int nr, volatile void *addr)
149 {
150 int mask, retval;
151 volatile unsigned int *a = (volatile unsigned int *) addr;
152
153 a += nr >> 5;
154 mask = 1 << (nr & 0x1f);
155 retval = (mask & *a) != 0;
156 *a &= ~mask;
157 return retval;
158 }
159
test_and_change_bit(int nr,volatile void * addr)160 static inline int test_and_change_bit(int nr, volatile void *addr)
161 {
162 int mask, retval;
163 volatile unsigned int *a = (volatile unsigned int *) addr;
164 unsigned long flags;
165
166 a += nr >> 5;
167 mask = 1 << (nr & 0x1f);
168 save_flags_cli(flags);
169 retval = (mask & *a) != 0;
170 *a ^= mask;
171 restore_flags(flags);
172
173 return retval;
174 }
175
__test_and_change_bit(int nr,volatile void * addr)176 static inline int __test_and_change_bit(int nr, volatile void *addr)
177 {
178 int mask, retval;
179 volatile unsigned int *a = (volatile unsigned int *) addr;
180
181 a += nr >> 5;
182 mask = 1 << (nr & 0x1f);
183 retval = (mask & *a) != 0;
184 *a ^= mask;
185 return retval;
186 }
187
188 /*
189 * This routine doesn't need to be atomic.
190 */
__constant_test_bit(int nr,const volatile void * addr)191 static inline int __constant_test_bit(int nr, const volatile void *addr)
192 {
193 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
194 }
195
__test_bit(int nr,volatile void * addr)196 static inline int __test_bit(int nr, volatile void *addr)
197 {
198 int * a = (int *) addr;
199 int mask;
200
201 a += nr >> 5;
202 mask = 1 << (nr & 0x1f);
203 return ((mask & *a) != 0);
204 }
205
206 #define test_bit(nr,addr) \
207 (__builtin_constant_p(nr) ? \
208 __constant_test_bit((nr),(addr)) : \
209 __test_bit((nr),(addr)))
210
211 #define find_first_zero_bit(addr, size) \
212 find_next_zero_bit((addr), (size), 0)
213
find_next_zero_bit(void * addr,int size,int offset)214 static inline int find_next_zero_bit(void *addr, int size, int offset)
215 {
216 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
217 unsigned long result = offset & ~31UL;
218 unsigned long tmp;
219
220 if (offset >= size)
221 return size;
222 size -= result;
223 offset &= 31UL;
224 if (offset) {
225 tmp = *(p++);
226 tmp |= ~0UL >> (32-offset);
227 if (size < 32)
228 goto found_first;
229 if (~tmp)
230 goto found_middle;
231 size -= 32;
232 result += 32;
233 }
234 while (size & ~31UL) {
235 if (~(tmp = *(p++)))
236 goto found_middle;
237 result += 32;
238 size -= 32;
239 }
240 if (!size)
241 return result;
242 tmp = *p;
243
244 found_first:
245 tmp |= ~0UL >> size;
246 found_middle:
247 return result + ffz(tmp);
248 }
249
250 /*
251 * hweightN: returns the hamming weight (i.e. the number
252 * of bits set) of a N-bit word
253 */
254
255 #define hweight32(x) generic_hweight32(x)
256 #define hweight16(x) generic_hweight16(x)
257 #define hweight8(x) generic_hweight8(x)
258
ext2_set_bit(int nr,volatile void * addr)259 static inline int ext2_set_bit(int nr, volatile void *addr)
260 {
261 int mask, retval;
262 unsigned long flags;
263 volatile unsigned char *ADDR = (unsigned char *) addr;
264
265 ADDR += nr >> 3;
266 mask = 1 << (nr & 0x07);
267 save_flags_cli(flags);
268 retval = (mask & *ADDR) != 0;
269 *ADDR |= mask;
270 restore_flags(flags);
271 return retval;
272 }
273
ext2_clear_bit(int nr,volatile void * addr)274 static inline int ext2_clear_bit(int nr, volatile void *addr)
275 {
276 int mask, retval;
277 unsigned long flags;
278 volatile unsigned char *ADDR = (unsigned char *) addr;
279
280 ADDR += nr >> 3;
281 mask = 1 << (nr & 0x07);
282 save_flags_cli(flags);
283 retval = (mask & *ADDR) != 0;
284 *ADDR &= ~mask;
285 restore_flags(flags);
286 return retval;
287 }
288
ext2_test_bit(int nr,const volatile void * addr)289 static inline int ext2_test_bit(int nr, const volatile void *addr)
290 {
291 int mask;
292 const volatile unsigned char *ADDR = (const unsigned char *) addr;
293
294 ADDR += nr >> 3;
295 mask = 1 << (nr & 0x07);
296 return ((mask & *ADDR) != 0);
297 }
298
299 #define ext2_find_first_zero_bit(addr, size) \
300 ext2_find_next_zero_bit((addr), (size), 0)
301
ext2_find_next_zero_bit(void * addr,unsigned long size,unsigned long offset)302 static inline unsigned long ext2_find_next_zero_bit(void *addr,
303 unsigned long size, unsigned long offset)
304 {
305 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
306 unsigned long result = offset & ~31UL;
307 unsigned long tmp;
308
309 if (offset >= size)
310 return size;
311 size -= result;
312 offset &= 31UL;
313 if(offset) {
314 /* We hold the little endian value in tmp, but then the
315 * shift is illegal. So we could keep a big endian value
316 * in tmp, like this:
317 *
318 * tmp = __swab32(*(p++));
319 * tmp |= ~0UL >> (32-offset);
320 *
321 * but this would decrease preformance, so we change the
322 * shift:
323 */
324 tmp = *(p++);
325 tmp |= __swab32(~0UL >> (32-offset));
326 if(size < 32)
327 goto found_first;
328 if(~tmp)
329 goto found_middle;
330 size -= 32;
331 result += 32;
332 }
333 while(size & ~31UL) {
334 if(~(tmp = *(p++)))
335 goto found_middle;
336 result += 32;
337 size -= 32;
338 }
339 if(!size)
340 return result;
341 tmp = *p;
342
343 found_first:
344 /* tmp is little endian, so we would have to swab the shift,
345 * see above. But then we have to swab tmp below for ffz, so
346 * we might as well do this here.
347 */
348 return result + ffz(__swab32(tmp) | (~0UL << size));
349 found_middle:
350 return result + ffz(__swab32(tmp));
351 }
352
353 /* Bitmap functions for the minix filesystem. */
354 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
355 #define minix_set_bit(nr,addr) set_bit(nr,addr)
356 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
357 #define minix_test_bit(nr,addr) test_bit(nr,addr)
358 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
359
360 /**
361 * hweightN - returns the hamming weight of a N-bit word
362 * @x: the word to weigh
363 *
364 * The Hamming Weight of a number is the total number of bits set in it.
365 */
366
367 #define hweight32(x) generic_hweight32(x)
368 #define hweight16(x) generic_hweight16(x)
369 #define hweight8(x) generic_hweight8(x)
370
371 #endif /* __KERNEL__ */
372
373 #endif /* _MICROBLAZE_BITOPS_H */
374