1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright IBM Corp. 1999,2013
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 *
7 * The description below was taken in large parts from the powerpc
8 * bitops header file:
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so the bits
16 * end up numbered:
17 * |63..............0|127............64|191...........128|255...........192|
18 *
19 * We also have special functions which work with an MSB0 encoding.
20 * The bits are numbered:
21 * |0..............63|64............127|128...........191|192...........255|
22 *
23 * The main difference is that bit 0-63 in the bit number field needs to be
24 * reversed compared to the LSB0 encoded bit fields. This can be achieved by
25 * XOR with 0x3f.
26 *
27 */
28
29 #ifndef _S390_BITOPS_H
30 #define _S390_BITOPS_H
31
32 #ifndef _LINUX_BITOPS_H
33 #error only <linux/bitops.h> can be included directly
34 #endif
35
36 #include <linux/typecheck.h>
37 #include <linux/compiler.h>
38 #include <linux/types.h>
39 #include <asm/atomic_ops.h>
40 #include <asm/barrier.h>
41
42 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
43
44 static inline unsigned long *
__bitops_word(unsigned long nr,const volatile unsigned long * ptr)45 __bitops_word(unsigned long nr, const volatile unsigned long *ptr)
46 {
47 unsigned long addr;
48
49 addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
50 return (unsigned long *)addr;
51 }
52
__bitops_mask(unsigned long nr)53 static inline unsigned long __bitops_mask(unsigned long nr)
54 {
55 return 1UL << (nr & (BITS_PER_LONG - 1));
56 }
57
arch_set_bit(unsigned long nr,volatile unsigned long * ptr)58 static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
59 {
60 unsigned long *addr = __bitops_word(nr, ptr);
61 unsigned long mask = __bitops_mask(nr);
62
63 __atomic64_or(mask, (long *)addr);
64 }
65
arch_clear_bit(unsigned long nr,volatile unsigned long * ptr)66 static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
67 {
68 unsigned long *addr = __bitops_word(nr, ptr);
69 unsigned long mask = __bitops_mask(nr);
70
71 __atomic64_and(~mask, (long *)addr);
72 }
73
arch_change_bit(unsigned long nr,volatile unsigned long * ptr)74 static __always_inline void arch_change_bit(unsigned long nr,
75 volatile unsigned long *ptr)
76 {
77 unsigned long *addr = __bitops_word(nr, ptr);
78 unsigned long mask = __bitops_mask(nr);
79
80 __atomic64_xor(mask, (long *)addr);
81 }
82
arch_test_and_set_bit(unsigned long nr,volatile unsigned long * ptr)83 static inline bool arch_test_and_set_bit(unsigned long nr,
84 volatile unsigned long *ptr)
85 {
86 unsigned long *addr = __bitops_word(nr, ptr);
87 unsigned long mask = __bitops_mask(nr);
88 unsigned long old;
89
90 old = __atomic64_or_barrier(mask, (long *)addr);
91 return old & mask;
92 }
93
arch_test_and_clear_bit(unsigned long nr,volatile unsigned long * ptr)94 static inline bool arch_test_and_clear_bit(unsigned long nr,
95 volatile unsigned long *ptr)
96 {
97 unsigned long *addr = __bitops_word(nr, ptr);
98 unsigned long mask = __bitops_mask(nr);
99 unsigned long old;
100
101 old = __atomic64_and_barrier(~mask, (long *)addr);
102 return old & mask;
103 }
104
arch_test_and_change_bit(unsigned long nr,volatile unsigned long * ptr)105 static inline bool arch_test_and_change_bit(unsigned long nr,
106 volatile unsigned long *ptr)
107 {
108 unsigned long *addr = __bitops_word(nr, ptr);
109 unsigned long mask = __bitops_mask(nr);
110 unsigned long old;
111
112 old = __atomic64_xor_barrier(mask, (long *)addr);
113 return old & mask;
114 }
115
116 static __always_inline void
arch___set_bit(unsigned long nr,volatile unsigned long * addr)117 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
118 {
119 unsigned long *p = __bitops_word(nr, addr);
120 unsigned long mask = __bitops_mask(nr);
121
122 *p |= mask;
123 }
124
125 static __always_inline void
arch___clear_bit(unsigned long nr,volatile unsigned long * addr)126 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
127 {
128 unsigned long *p = __bitops_word(nr, addr);
129 unsigned long mask = __bitops_mask(nr);
130
131 *p &= ~mask;
132 }
133
134 static __always_inline void
arch___change_bit(unsigned long nr,volatile unsigned long * addr)135 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
136 {
137 unsigned long *p = __bitops_word(nr, addr);
138 unsigned long mask = __bitops_mask(nr);
139
140 *p ^= mask;
141 }
142
143 static __always_inline bool
arch___test_and_set_bit(unsigned long nr,volatile unsigned long * addr)144 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
145 {
146 unsigned long *p = __bitops_word(nr, addr);
147 unsigned long mask = __bitops_mask(nr);
148 unsigned long old;
149
150 old = *p;
151 *p |= mask;
152 return old & mask;
153 }
154
155 static __always_inline bool
arch___test_and_clear_bit(unsigned long nr,volatile unsigned long * addr)156 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
157 {
158 unsigned long *p = __bitops_word(nr, addr);
159 unsigned long mask = __bitops_mask(nr);
160 unsigned long old;
161
162 old = *p;
163 *p &= ~mask;
164 return old & mask;
165 }
166
167 static __always_inline bool
arch___test_and_change_bit(unsigned long nr,volatile unsigned long * addr)168 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
169 {
170 unsigned long *p = __bitops_word(nr, addr);
171 unsigned long mask = __bitops_mask(nr);
172 unsigned long old;
173
174 old = *p;
175 *p ^= mask;
176 return old & mask;
177 }
178
179 #define arch_test_bit generic_test_bit
180 #define arch_test_bit_acquire generic_test_bit_acquire
181
arch_test_and_set_bit_lock(unsigned long nr,volatile unsigned long * ptr)182 static inline bool arch_test_and_set_bit_lock(unsigned long nr,
183 volatile unsigned long *ptr)
184 {
185 if (arch_test_bit(nr, ptr))
186 return true;
187 return arch_test_and_set_bit(nr, ptr);
188 }
189
arch_clear_bit_unlock(unsigned long nr,volatile unsigned long * ptr)190 static inline void arch_clear_bit_unlock(unsigned long nr,
191 volatile unsigned long *ptr)
192 {
193 smp_mb__before_atomic();
194 arch_clear_bit(nr, ptr);
195 }
196
arch___clear_bit_unlock(unsigned long nr,volatile unsigned long * ptr)197 static inline void arch___clear_bit_unlock(unsigned long nr,
198 volatile unsigned long *ptr)
199 {
200 smp_mb();
201 arch___clear_bit(nr, ptr);
202 }
203
204 #include <asm-generic/bitops/instrumented-atomic.h>
205 #include <asm-generic/bitops/instrumented-non-atomic.h>
206 #include <asm-generic/bitops/instrumented-lock.h>
207
208 /*
209 * Functions which use MSB0 bit numbering.
210 * The bits are numbered:
211 * |0..............63|64............127|128...........191|192...........255|
212 */
213 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
214 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
215 unsigned long offset);
216
217 #define for_each_set_bit_inv(bit, addr, size) \
218 for ((bit) = find_first_bit_inv((addr), (size)); \
219 (bit) < (size); \
220 (bit) = find_next_bit_inv((addr), (size), (bit) + 1))
221
set_bit_inv(unsigned long nr,volatile unsigned long * ptr)222 static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
223 {
224 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
225 }
226
clear_bit_inv(unsigned long nr,volatile unsigned long * ptr)227 static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
228 {
229 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
230 }
231
test_and_clear_bit_inv(unsigned long nr,volatile unsigned long * ptr)232 static inline bool test_and_clear_bit_inv(unsigned long nr,
233 volatile unsigned long *ptr)
234 {
235 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
236 }
237
__set_bit_inv(unsigned long nr,volatile unsigned long * ptr)238 static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
239 {
240 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
241 }
242
__clear_bit_inv(unsigned long nr,volatile unsigned long * ptr)243 static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
244 {
245 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
246 }
247
test_bit_inv(unsigned long nr,const volatile unsigned long * ptr)248 static inline bool test_bit_inv(unsigned long nr,
249 const volatile unsigned long *ptr)
250 {
251 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
252 }
253
254 /**
255 * __flogr - find leftmost one
256 * @word - The word to search
257 *
258 * Returns the bit number of the most significant bit set,
259 * where the most significant bit has bit number 0.
260 * If no bit is set this function returns 64.
261 */
__flogr(unsigned long word)262 static inline unsigned char __flogr(unsigned long word)
263 {
264 if (__builtin_constant_p(word)) {
265 unsigned long bit = 0;
266
267 if (!word)
268 return 64;
269 if (!(word & 0xffffffff00000000UL)) {
270 word <<= 32;
271 bit += 32;
272 }
273 if (!(word & 0xffff000000000000UL)) {
274 word <<= 16;
275 bit += 16;
276 }
277 if (!(word & 0xff00000000000000UL)) {
278 word <<= 8;
279 bit += 8;
280 }
281 if (!(word & 0xf000000000000000UL)) {
282 word <<= 4;
283 bit += 4;
284 }
285 if (!(word & 0xc000000000000000UL)) {
286 word <<= 2;
287 bit += 2;
288 }
289 if (!(word & 0x8000000000000000UL)) {
290 word <<= 1;
291 bit += 1;
292 }
293 return bit;
294 } else {
295 union register_pair rp;
296
297 rp.even = word;
298 asm volatile(
299 " flogr %[rp],%[rp]\n"
300 : [rp] "+d" (rp.pair) : : "cc");
301 return rp.even;
302 }
303 }
304
305 /**
306 * __ffs - find first bit in word.
307 * @word: The word to search
308 *
309 * Undefined if no bit exists, so code should check against 0 first.
310 */
__ffs(unsigned long word)311 static inline unsigned long __ffs(unsigned long word)
312 {
313 return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
314 }
315
316 /**
317 * ffs - find first bit set
318 * @word: the word to search
319 *
320 * This is defined the same way as the libc and
321 * compiler builtin ffs routines (man ffs).
322 */
ffs(int word)323 static inline int ffs(int word)
324 {
325 unsigned long mask = 2 * BITS_PER_LONG - 1;
326 unsigned int val = (unsigned int)word;
327
328 return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
329 }
330
331 /**
332 * __fls - find last (most-significant) set bit in a long word
333 * @word: the word to search
334 *
335 * Undefined if no set bit exists, so code should check against 0 first.
336 */
__fls(unsigned long word)337 static inline unsigned long __fls(unsigned long word)
338 {
339 return __flogr(word) ^ (BITS_PER_LONG - 1);
340 }
341
342 /**
343 * fls64 - find last set bit in a 64-bit word
344 * @word: the word to search
345 *
346 * This is defined in a similar way as the libc and compiler builtin
347 * ffsll, but returns the position of the most significant set bit.
348 *
349 * fls64(value) returns 0 if value is 0 or the position of the last
350 * set bit if value is nonzero. The last (most significant) bit is
351 * at position 64.
352 */
fls64(unsigned long word)353 static inline int fls64(unsigned long word)
354 {
355 unsigned long mask = 2 * BITS_PER_LONG - 1;
356
357 return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
358 }
359
360 /**
361 * fls - find last (most-significant) bit set
362 * @word: the word to search
363 *
364 * This is defined the same way as ffs.
365 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
366 */
fls(unsigned int word)367 static inline int fls(unsigned int word)
368 {
369 return fls64(word);
370 }
371
372 #include <asm-generic/bitops/ffz.h>
373 #include <asm-generic/bitops/hweight.h>
374 #include <asm-generic/bitops/sched.h>
375 #include <asm-generic/bitops/le.h>
376 #include <asm-generic/bitops/ext2-atomic-setbit.h>
377
378 #endif /* _S390_BITOPS_H */
379