1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 */
5 #ifndef UTIL_H
6 #define UTIL_H
7
8 #include <compiler.h>
9 #include <inttypes.h>
10
11 #define SIZE_4K UINTPTR_C(0x1000)
12 #define SIZE_1M UINTPTR_C(0x100000)
13 #define SIZE_2M UINTPTR_C(0x200000)
14 #define SIZE_4M UINTPTR_C(0x400000)
15 #define SIZE_8M UINTPTR_C(0x800000)
16 #define SIZE_2G UINTPTR_C(0x80000000)
17
18 #ifndef MAX
19 #ifndef __ASSEMBLER__
20 #define MAX(a, b) \
21 (__extension__({ __typeof__(a) _a = (a); \
22 __typeof__(b) _b = (b); \
23 _a > _b ? _a : _b; }))
24
25 #define MIN(a, b) \
26 (__extension__({ __typeof__(a) _a = (a); \
27 __typeof__(b) _b = (b); \
28 _a < _b ? _a : _b; }))
29 #else
30 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
31 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
32 #endif
33 #endif
34
35 /*
36 * In some particular conditions MAX and MIN macros fail to
37 * build from C source file implmentation. In such case one
38 * need to use MAX_UNSAFE/MIN_UNSAFE instead.
39 */
40 #define MAX_UNSAFE(a, b) (((a) > (b)) ? (a) : (b))
41 #define MIN_UNSAFE(a, b) (((a) < (b)) ? (a) : (b))
42
43 #ifndef ARRAY_SIZE
44 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
45 #endif
46
47 #ifndef __ASSEMBLER__
48 /* Round up the even multiple of size, size has to be a multiple of 2 */
49 #define ROUNDUP(v, size) (((v) + ((__typeof__(v))(size) - 1)) & \
50 ~((__typeof__(v))(size) - 1))
51
52 #define ROUNDUP_OVERFLOW(v, size, res) (__extension__({ \
53 typeof(*(res)) __roundup_tmp = 0; \
54 typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
55 \
56 ADD_OVERFLOW((v), __roundup_mask, &__roundup_tmp) ? 1 : \
57 ((void)(*(res) = __roundup_tmp & ~__roundup_mask), 0); \
58 }))
59
60 /*
61 * Rounds up to the nearest multiple of y and then divides by y. Safe
62 * against overflow, y has to be a multiple of 2.
63 *
64 * This macro is intended to be used to convert from "number of bytes" to
65 * "number of pages" or similar units. Example:
66 * num_pages = ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE);
67 */
68 #define ROUNDUP_DIV(x, y) (__extension__({ \
69 typeof(x) __roundup_x = (x); \
70 typeof(y) __roundup_mask = (typeof(x))(y) - 1; \
71 \
72 (__roundup_x / (y)) + (__roundup_x & __roundup_mask ? 1 : 0); \
73 }))
74
75 /* Round down the even multiple of size, size has to be a multiple of 2 */
76 #define ROUNDDOWN(v, size) ((v) & ~((__typeof__(v))(size) - 1))
77
78 /*
79 * Round up the result of x / y to the nearest upper integer if result is not
80 * already an integer.
81 */
82 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
83
84 /* Unsigned integer division with nearest rounding variant */
85 #define UDIV_ROUND_NEAREST(x, y) \
86 (__extension__ ({ __typeof__(x) _x = (x); \
87 __typeof__(y) _y = (y); \
88 (_x + (_y / 2)) / _y; }))
89 #else
90 #define ROUNDUP(x, y) ((((x) + (y) - 1) / (y)) * (y))
91 #define ROUNDDOWN(x, y) (((x) / (y)) * (y))
92 #define UDIV_ROUND_NEAREST(x, y) (((x) + ((y) / 2)) / (y))
93 #endif
94
95 /* x has to be of an unsigned type */
96 #define IS_POWER_OF_TWO(x) (((x) != 0) && (((x) & (~(x) + 1)) == (x)))
97
98 #define IS_ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
99 #define IS_ALIGNED_WITH_TYPE(x, type) \
100 (__extension__({ \
101 type __is_aligned_y; \
102 IS_ALIGNED((uintptr_t)(x), __alignof__(__is_aligned_y)); \
103 }))
104
105 #define TO_STR(x) _TO_STR(x)
106 #define _TO_STR(x) #x
107
108 #define CONCAT(x, y) _CONCAT(x, y)
109 #define _CONCAT(x, y) x##y
110
111 #define container_of(ptr, type, member) \
112 (__extension__({ \
113 const typeof(((type *)0)->member) *__ptr = (ptr); \
114 (type *)((unsigned long)(__ptr) - offsetof(type, member)); \
115 }))
116
117 #define MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
118
119 #ifdef __ASSEMBLER__
120 #define BIT32(nr) (1 << (nr))
121 #define BIT64(nr) (1 << (nr))
122 #define SHIFT_U32(v, shift) ((v) << (shift))
123 #define SHIFT_U64(v, shift) ((v) << (shift))
124 #else
125 #define BIT32(nr) (UINT32_C(1) << (nr))
126 #define BIT64(nr) (UINT64_C(1) << (nr))
127 #define SHIFT_U32(v, shift) ((uint32_t)(v) << (shift))
128 #define SHIFT_U64(v, shift) ((uint64_t)(v) << (shift))
129 #endif
130 #define BIT(nr) BIT32(nr)
131
132 /*
133 * Create a contiguous bitmask starting at bit position @l and ending at
134 * position @h. For example
135 * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
136 */
137 #define GENMASK_32(h, l) \
138 (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
139
140 #define GENMASK_64(h, l) \
141 (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
142
143 /*
144 * Checking overflow for addition, subtraction and multiplication. Result
145 * of operation is stored in res which is a pointer to some kind of
146 * integer.
147 *
148 * The macros return true if an overflow occurred and *res is undefined.
149 */
150 #define ADD_OVERFLOW(a, b, res) __compiler_add_overflow((a), (b), (res))
151 #define SUB_OVERFLOW(a, b, res) __compiler_sub_overflow((a), (b), (res))
152 #define MUL_OVERFLOW(a, b, res) __compiler_mul_overflow((a), (b), (res))
153
154 /* Return a signed +1, 0 or -1 value based on data comparison */
155 #define CMP_TRILEAN(a, b) \
156 (__extension__({ \
157 __typeof__(a) _a = (a); \
158 __typeof__(b) _b = (b); \
159 \
160 _a > _b ? 1 : _a < _b ? -1 : 0; \
161 }))
162
163 #ifndef __ASSEMBLER__
reg_pair_to_64(uint32_t reg0,uint32_t reg1)164 static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1)
165 {
166 return (uint64_t)reg0 << 32 | reg1;
167 }
168
reg_pair_from_64(uint64_t val,uint32_t * reg0,uint32_t * reg1)169 static inline void reg_pair_from_64(uint64_t val, uint32_t *reg0,
170 uint32_t *reg1)
171 {
172 *reg0 = val >> 32;
173 *reg1 = val;
174 }
175
176 /* Get and set bit fields */
get_field_u32(uint32_t reg,uint32_t mask)177 static inline uint32_t get_field_u32(uint32_t reg, uint32_t mask)
178 {
179 return (reg & mask) / (mask & ~(mask - 1));
180 }
181
set_field_u32(uint32_t reg,uint32_t mask,uint32_t val)182 static inline uint32_t set_field_u32(uint32_t reg, uint32_t mask, uint32_t val)
183 {
184 return (reg & ~mask) | (val * (mask & ~(mask - 1)));
185 }
186
get_field_u64(uint64_t reg,uint64_t mask)187 static inline uint64_t get_field_u64(uint64_t reg, uint64_t mask)
188 {
189 return (reg & mask) / (mask & ~(mask - 1));
190 }
191
set_field_u64(uint64_t reg,uint64_t mask,uint64_t val)192 static inline uint64_t set_field_u64(uint64_t reg, uint64_t mask, uint64_t val)
193 {
194 return (reg & ~mask) | (val * (mask & ~(mask - 1)));
195 }
196 #endif
197
198 #endif /*UTIL_H*/
199