1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 */
5 #ifndef UTIL_H
6 #define UTIL_H
7
8 #include <compiler.h>
9 #include <inttypes.h>
10
11 #ifndef __ASSEMBLER__
12 #include <assert.h>
13 #include <stddef.h>
14 #endif
15
16 #define SIZE_4K UINTPTR_C(0x1000)
17 #define SIZE_1M UINTPTR_C(0x100000)
18 #define SIZE_2M UINTPTR_C(0x200000)
19 #define SIZE_4M UINTPTR_C(0x400000)
20 #define SIZE_8M UINTPTR_C(0x800000)
21 #define SIZE_2G UINTPTR_C(0x80000000)
22
23 #ifndef MAX
24 #ifndef __ASSEMBLER__
25 #define MAX(a, b) \
26 (__extension__({ __typeof__(a) _a = (a); \
27 __typeof__(b) _b = (b); \
28 _a > _b ? _a : _b; }))
29
30 #define MIN(a, b) \
31 (__extension__({ __typeof__(a) _a = (a); \
32 __typeof__(b) _b = (b); \
33 _a < _b ? _a : _b; }))
34 #else
35 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
36 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
37 #endif
38 #endif
39
40 /*
41 * In some particular conditions MAX and MIN macros fail to
42 * build from C source file implmentation. In such case one
43 * need to use MAX_UNSAFE/MIN_UNSAFE instead.
44 */
45 #define MAX_UNSAFE(a, b) (((a) > (b)) ? (a) : (b))
46 #define MIN_UNSAFE(a, b) (((a) < (b)) ? (a) : (b))
47
48 #ifndef ARRAY_SIZE
49 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
50 #endif
51
52 #ifndef __ASSEMBLER__
53 /* Round up the even multiple of size */
54 #define ROUNDUP(x, y) \
55 ((((x) + (__typeof__(x))(y) - 1) / (__typeof__(x))(y)) * \
56 (__typeof__(x))(y))
57
58 /* Round up the even multiple of size, size has to be a power of 2 */
59 #define ROUNDUP2(v, size) \
60 (__extension__({ \
61 assert(IS_POWER_OF_TWO(size)); \
62 (((v) + ((__typeof__(v))(size) - 1)) & \
63 ~((__typeof__(v))(size) - 1)); \
64 }))
65
66 /*
67 * ROUNDUP_OVERFLOW(v, size, res)
68 *
69 * @v: Input value to round
70 * @size: Rounding operand
71 * @res: Pointer where boolean overflow status (0/false or 1/true) is stored
72 * @return: boolean overflow status of the resulting rounded value
73 *
74 * Round up value @v to the even multiple of @size and return if result
75 * overflows the output value range pointed by @res. The rounded value is
76 * stored in the memory address pointed by @res.
77 */
78 #define ROUNDUP_OVERFLOW(v, size, res) \
79 (__extension__({ \
80 typeof(v) __roundup_mod = 0; \
81 typeof(v) __roundup_add = 0; \
82 \
83 __roundup_mod = (v) % (typeof(v))(size); \
84 if (__roundup_mod) \
85 __roundup_add = (typeof(v))(size) - __roundup_mod; \
86 ADD_OVERFLOW((v), __roundup_add, (res)); \
87 }))
88
89 /*
90 * ROUNDUP2_OVERFLOW(v, size, res)
91 *
92 * @v: Input value to round
93 * @size: Rounding operand, must be a power of 2
94 * @res: Pointer where boolean overflow status (0/false or 1/true) is stored
95 * @return: boolean overflow status of the resulting rounded value
96 *
97 * Round up value @v to the even multiple of @size and return if result
98 * overflows the output value range pointed by @res. The rounded value is
99 * stored in the memory address pointed by @res.
100 */
101 #define ROUNDUP2_OVERFLOW(v, size, res) \
102 (__extension__({ \
103 typeof(*(res)) __roundup_tmp = 0; \
104 typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
105 \
106 assert(IS_POWER_OF_TWO(size)); \
107 ADD_OVERFLOW((v), __roundup_mask, &__roundup_tmp) ? 1 : \
108 ((void)(*(res) = __roundup_tmp & ~__roundup_mask), 0); \
109 }))
110
111 /*
112 * ROUNDUP2_DIV(x, y)
113 *
114 * Rounds up to the nearest multiple of y and then divides by y. Safe
115 * against overflow, y has to be a power of 2.
116 *
117 * This macro is intended to be used to convert from "number of bytes" to
118 * "number of pages" or similar units. Example:
119 * num_pages = ROUNDUP2_DIV(num_bytes, SMALL_PAGE_SIZE);
120 */
121 #define ROUNDUP2_DIV(x, y) \
122 (__extension__({ \
123 typeof(x) __roundup_x = (x); \
124 typeof(y) __roundup_mask = (typeof(x))(y) - 1; \
125 \
126 assert(IS_POWER_OF_TWO(y)); \
127 (__roundup_x / (y)) + (__roundup_x & __roundup_mask ? 1 : 0); \
128 }))
129
130 /*
131 * ROUNDUP_DIV(x, y)
132 *
133 * Rounds up to the nearest multiple of y and then divides by y. Safe
134 * against overflow.
135 */
136 #define ROUNDUP_DIV(x, y) (ROUNDUP((x), (y)) / (__typeof__(x))(y))
137
138 /* Round down the even multiple of size */
139 #define ROUNDDOWN(x, y) (((x) / (__typeof__(x))(y)) * (__typeof__(x))(y))
140
141 /* Round down the even multiple of size, size has to be a power of 2 */
142 #define ROUNDDOWN2(v, size) \
143 (__extension__({ \
144 assert(IS_POWER_OF_TWO(size)); \
145 ((v) & ~((__typeof__(v))(size) - 1)); \
146 }))
147
148 /*
149 * Round up the result of x / y to the nearest upper integer if result is not
150 * already an integer.
151 */
152 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
153
154 /* Unsigned integer division with nearest rounding variant */
155 #define UDIV_ROUND_NEAREST(x, y) \
156 (__extension__ ({ __typeof__(x) _x = (x); \
157 __typeof__(y) _y = (y); \
158 (_x + (_y / 2)) / _y; }))
159 #else /* __ASSEMBLER__ */
160 #define ROUNDUP(x, y) ((((x) + (y) - 1) / (y)) * (y))
161 #define ROUNDDOWN(x, y) (((x) / (y)) * (y))
162 #define UDIV_ROUND_NEAREST(x, y) (((x) + ((y) / 2)) / (y))
163 #endif /* __ASSEMBLER__ */
164
165 /* x has to be of an unsigned type */
166 #define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x)))
167
168 #define IS_ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
169 #define IS_ALIGNED_WITH_TYPE(x, type) \
170 (__extension__({ \
171 type __is_aligned_y; \
172 IS_ALIGNED((uintptr_t)(x), __alignof__(__is_aligned_y)); \
173 }))
174
175 #define TO_STR(x) _TO_STR(x)
176 #define _TO_STR(x) #x
177
178 #define CONCAT(x, y) _CONCAT(x, y)
179 #define _CONCAT(x, y) x##y
180
181 #define container_of(ptr, type, member) \
182 (__extension__({ \
183 const typeof(((type *)0)->member) *__ptr = (ptr); \
184 (type *)((unsigned long)(__ptr) - offsetof(type, member)); \
185 }))
186
187 #define MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
188
189 #ifdef __ASSEMBLER__
190 #define BIT32(nr) (1 << (nr))
191 #define BIT64(nr) (1 << (nr))
192 #define SHIFT_U32(v, shift) ((v) << (shift))
193 #define SHIFT_U64(v, shift) ((v) << (shift))
194 #else
195 #define BIT32(nr) (UINT32_C(1) << (nr))
196 #define BIT64(nr) (UINT64_C(1) << (nr))
197 #define SHIFT_U32(v, shift) ((uint32_t)(v) << (shift))
198 #define SHIFT_U64(v, shift) ((uint64_t)(v) << (shift))
199 #endif
200 #define BIT(nr) BIT32(nr)
201
202 /*
203 * Create a contiguous bitmask starting at bit position @l and ending at
204 * position @h. For example
205 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
206 */
207 #define GENMASK_32(h, l) \
208 ((UINT32_C(0xffffffff) << (l)) & \
209 (UINT32_C(0xffffffff) >> (32 - 1 - (h))))
210
211 #define GENMASK_64(h, l) \
212 (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
213
214 /*
215 * Checking overflow for addition, subtraction and multiplication. Result
216 * of operation is stored in res which is a pointer to some kind of
217 * integer.
218 *
219 * The macros return true if an overflow occurred and *res is undefined.
220 */
221 #define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res))
222 #define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res))
223 #define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res))
224
225 /* Return a signed +1, 0 or -1 value based on data comparison */
226 #define CMP_TRILEAN(a, b) \
227 (__extension__({ \
228 __typeof__(a) _a = (a); \
229 __typeof__(b) _b = (b); \
230 \
231 _a > _b ? 1 : _a < _b ? -1 : 0; \
232 }))
233
234 #ifndef __ASSEMBLER__
reg_pair_to_64(uint32_t reg0,uint32_t reg1)235 static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
236 {
237 return (uint64_t)reg0 << 32 | reg1;
238 }
239
high32_from_64(uint64_t val)240 static inline uint32_t high32_from_64(uint64_t val)
241 {
242 return val >> 32;
243 }
244
low32_from_64(uint64_t val)245 static inline uint32_t low32_from_64(uint64_t val)
246 {
247 return val;
248 }
249
reg_pair_from_64(uint64_t val,uint32_t * reg0,uint32_t * reg1)250 static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
251 uint32_t *reg1)
252 {
253 *reg0 = high32_from_64(val);
254 *reg1 = low32_from_64(val);
255 }
256
257 /*
258 * Functions to get and set bit fields in a 32/64-bit bitfield.
259 *
260 * These helper functions allow setting and extracting specific bits in
261 * a bitfield @reg according to a given mask @mask. The mask
262 * specifies which bits in the bitfield should be updated or extracted.
263 * These functions exist in both 32-bit and 64-bit versions.
264 *
265 * set_field_u32()
266 * set_field_u64() - Modifies specific bits in a bitfield by clearing
267 * the bits specified by the mask and then setting
268 * these bits to the new value @val.
269 * @reg: The original 32-bit or 64-bit bitfield value.
270 * @mask: A bitmask indicating which bits in the bitfield should be
271 * updated.
272 * @val: The new value to be loaded into the bitfield, left shifted
273 * according to @mask rightmost non-zero bit position.
274 * Returns the updated bitfield value with the specified bits set to
275 * the new value.
276 *
277 * E.g. set_bitfield_u32(0x123456, 0xf0ff00, 0xabcd) returns 0xa2cd56.
278 *
279 * get_field_u32()
280 * get_field_u64() - Extracts the value of specific bits in a bitfield
281 * by isolating the bits specified by the mask and
282 * then shifting them to the rightmost position.
283 * @reg: The original 32-bit or 64-bit bitfield value.
284 * @mask: A bitmask indicating which bits in the bitfield should be
285 * extracted.
286 * Returns the value of the bits specified by the mask, shifted to the
287 * @mask rightmost non-zero bit position.
288 *
289 * E.g. get_bitfield_u32(0x123456, 0xf0ff00) returns 0x1034.
290 */
get_field_u32(uint32_t reg,uint32_t mask)291 static inline uint32_t get_field_u32(uint32_t reg, uint32_t mask)
292 {
293 return (reg & mask) / (mask & ~(mask - 1));
294 }
295
set_field_u32(uint32_t reg,uint32_t mask,uint32_t val)296 static inline uint32_t set_field_u32(uint32_t reg, uint32_t mask, uint32_t val)
297 {
298 return (reg & ~mask) | (val * (mask & ~(mask - 1)));
299 }
300
get_field_u64(uint64_t reg,uint64_t mask)301 static inline uint64_t get_field_u64(uint64_t reg, uint64_t mask)
302 {
303 return (reg & mask) / (mask & ~(mask - 1));
304 }
305
set_field_u64(uint64_t reg,uint64_t mask,uint64_t val)306 static inline uint64_t set_field_u64(uint64_t reg, uint64_t mask, uint64_t val)
307 {
308 return (reg & ~mask) | (val * (mask & ~(mask - 1)));
309 }
310
311 /* Helper function for qsort with standard types */
312 void qsort_int(int *aa, size_t n);
313 void qsort_uint(unsigned int *aa, size_t n);
314 void qsort_long(long int *aa, size_t n);
315 void qsort_ul(unsigned long int *aa, size_t n);
316 void qsort_ll(long long int *aa, size_t n);
317 void qsort_ull(unsigned long long int *aa, size_t n);
318 void qsort_s8(int8_t *aa, size_t n);
319 void qsort_u8(uint8_t *aa, size_t n);
320 void qsort_s16(int16_t *aa, size_t n);
321 void qsort_u16(uint16_t *aa, size_t n);
322 void qsort_s32(int32_t *aa, size_t n);
323 void qsort_u32(uint32_t *aa, size_t n);
324 void qsort_s64(int64_t *aa, size_t n);
325 void qsort_u64(uint64_t *aa, size_t n);
326 #endif
327
328 #endif /*UTIL_H*/
329