1 #ifndef _LINUX_BITOPS_H
2 #define _LINUX_BITOPS_H
3 #include <asm/types.h>
4
5 /*
6 * Create a contiguous bitmask starting at bit position @l and ending at
7 * position @h. For example
8 * GENMASK(30, 21) gives us the 32bit vector 0x01fe00000.
9 */
10 #define GENMASK(h, l) \
11 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
12
13 #define GENMASK_ULL(h, l) \
14 (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LLONG - 1 - (h))))
15
16 /*
17 * ffs: find first bit set. This is defined the same way as
18 * the libc and compiler builtin ffs routines, therefore
19 * differs in spirit from the above ffz (man ffs).
20 */
21
generic_ffs(int x)22 static inline int generic_ffs(int x)
23 {
24 int r = 1;
25
26 if (!x)
27 return 0;
28 if (!(x & 0xffff)) {
29 x >>= 16;
30 r += 16;
31 }
32 if (!(x & 0xff)) {
33 x >>= 8;
34 r += 8;
35 }
36 if (!(x & 0xf)) {
37 x >>= 4;
38 r += 4;
39 }
40 if (!(x & 3)) {
41 x >>= 2;
42 r += 2;
43 }
44 if (!(x & 1)) {
45 x >>= 1;
46 r += 1;
47 }
48 return r;
49 }
50
51 /*
52 * fls: find last bit set.
53 */
54
generic_fls(int x)55 static __inline__ int generic_fls(int x)
56 {
57 int r = 32;
58
59 if (!x)
60 return 0;
61 if (!(x & 0xffff0000u)) {
62 x <<= 16;
63 r -= 16;
64 }
65 if (!(x & 0xff000000u)) {
66 x <<= 8;
67 r -= 8;
68 }
69 if (!(x & 0xf0000000u)) {
70 x <<= 4;
71 r -= 4;
72 }
73 if (!(x & 0xc0000000u)) {
74 x <<= 2;
75 r -= 2;
76 }
77 if (!(x & 0x80000000u)) {
78 x <<= 1;
79 r -= 1;
80 }
81 return r;
82 }
83
84 #if BITS_PER_LONG == 64
85
generic_ffsl(unsigned long x)86 static inline int generic_ffsl(unsigned long x)
87 {
88 return !x || (u32)x ? generic_ffs(x) : generic_ffs(x >> 32) + 32;
89 }
90
generic_flsl(unsigned long x)91 static inline int generic_flsl(unsigned long x)
92 {
93 u32 h = x >> 32;
94
95 return h ? generic_fls(h) + 32 : generic_fls(x);
96 }
97
98 #else
99 # define generic_ffsl generic_ffs
100 # define generic_flsl generic_fls
101 #endif
102
103 /*
104 * Include this here because some architectures need generic_ffs/fls in
105 * scope
106 */
107 #include <asm/bitops.h>
108
109 #if BITS_PER_LONG == 64
110 # define fls64 flsl
111 # define ffs64 ffsl
112 #else
113 # ifndef ffs64
generic_ffs64(__u64 x)114 static inline int generic_ffs64(__u64 x)
115 {
116 return !x || (__u32)x ? ffs(x) : ffs(x >> 32) + 32;
117 }
118 # define ffs64 generic_ffs64
119 # endif
120 # ifndef fls64
generic_fls64(__u64 x)121 static inline int generic_fls64(__u64 x)
122 {
123 __u32 h = x >> 32;
124
125 return h ? fls(h) + 32 : fls(x);
126 }
127 # define fls64 generic_fls64
128 # endif
129 #endif
130
get_bitmask_order(unsigned int count)131 static __inline__ int get_bitmask_order(unsigned int count)
132 {
133 int order;
134
135 order = fls(count);
136 return order; /* We could be slightly more clever with -1 here... */
137 }
138
get_count_order(unsigned int count)139 static __inline__ int get_count_order(unsigned int count)
140 {
141 int order;
142
143 order = fls(count) - 1;
144 if (count & (count - 1))
145 order++;
146 return order;
147 }
148
149 /*
150 * hweightN: returns the hamming weight (i.e. the number
151 * of bits set) of a N-bit word
152 */
153
generic_hweight32(unsigned int w)154 static inline unsigned int generic_hweight32(unsigned int w)
155 {
156 unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
157 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
158 res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
159 res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
160 return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
161 }
162
generic_hweight16(unsigned int w)163 static inline unsigned int generic_hweight16(unsigned int w)
164 {
165 unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
166 res = (res & 0x3333) + ((res >> 2) & 0x3333);
167 res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
168 return (res & 0x00FF) + ((res >> 8) & 0x00FF);
169 }
170
generic_hweight8(unsigned int w)171 static inline unsigned int generic_hweight8(unsigned int w)
172 {
173 unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
174 res = (res & 0x33) + ((res >> 2) & 0x33);
175 return (res & 0x0F) + ((res >> 4) & 0x0F);
176 }
177
generic_hweight64(__u64 w)178 static inline unsigned long generic_hweight64(__u64 w)
179 {
180 #if BITS_PER_LONG < 64
181 return generic_hweight32((unsigned int)(w >> 32)) +
182 generic_hweight32((unsigned int)w);
183 #else
184 u64 res;
185 res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
186 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
187 res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful);
188 res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul);
189 res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul);
190 return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul);
191 #endif
192 }
193
hweight_long(unsigned long w)194 static inline unsigned long hweight_long(unsigned long w)
195 {
196 return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
197 }
198
199 /*
200 * rol32 - rotate a 32-bit value left
201 *
202 * @word: value to rotate
203 * @shift: bits to roll
204 */
rol32(__u32 word,unsigned int shift)205 static inline __u32 rol32(__u32 word, unsigned int shift)
206 {
207 return (word << shift) | (word >> (32 - shift));
208 }
209
210 /*
211 * ror32 - rotate a 32-bit value right
212 *
213 * @word: value to rotate
214 * @shift: bits to roll
215 */
ror32(__u32 word,unsigned int shift)216 static inline __u32 ror32(__u32 word, unsigned int shift)
217 {
218 return (word >> shift) | (word << (32 - shift));
219 }
220
221 /* base-2 logarithm */
222 #define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
223 #define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
224 #define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
225 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
226 #define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
227
228 /**
229 * for_each_set_bit - iterate over every set bit in a memory region
230 * @bit: The integer iterator
231 * @addr: The address to base the search on
232 * @size: The maximum size to search
233 */
234 #define for_each_set_bit(bit, addr, size) \
235 for ( (bit) = find_first_bit(addr, size); \
236 (bit) < (size); \
237 (bit) = find_next_bit(addr, size, (bit) + 1) )
238
239 #endif
240