1 /*
2  * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #ifndef UTILS_DEF_H
9 #define UTILS_DEF_H
10 
11 #include <export/lib/utils_def_exp.h>
12 
13 /* Compute the number of elements in the given array */
14 #define ARRAY_SIZE(a)				\
15 	(sizeof(a) / sizeof((a)[0]))
16 
17 #define IS_POWER_OF_TWO(x)			\
18 	(((x) & ((x) - 1)) == 0)
19 
20 #define SIZE_FROM_LOG2_WORDS(n)		(U(4) << (n))
21 
22 #if defined(__LINKER__) || defined(__ASSEMBLER__)
23 #define BIT_32(nr)			(U(1) << (nr))
24 #define BIT_64(nr)			(ULL(1) << (nr))
25 #else
26 #define BIT_32(nr)			(((uint32_t)(1U)) << (nr))
27 #define BIT_64(nr)			(((uint64_t)(1ULL)) << (nr))
28 #endif
29 
30 #ifdef __aarch64__
31 #define BIT				BIT_64
32 #else
33 #define BIT				BIT_32
34 #endif
35 
36 /*
37  * Create a contiguous bitmask starting at bit position @low and ending at
38  * position @high. For example
39  * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
40  */
41 #if defined(__LINKER__) || defined(__ASSEMBLER__)
42 #define GENMASK_32(high, low) \
43 	(((0xFFFFFFFF) << (low)) & (0xFFFFFFFF >> (32 - 1 - (high))))
44 
45 #define GENMASK_64(high, low) \
46 	((~0 << (low)) & (~0 >> (64 - 1 - (high))))
47 #else
48 #define GENMASK_32(high, low) \
49 	((~UINT32_C(0) >> (32U - 1U - (high))) ^ ((BIT_32(low) - 1U)))
50 
51 #define GENMASK_64(high, low) \
52 	((~UINT64_C(0) >> (64U - 1U - (high))) ^ ((BIT_64(low) - 1U)))
53 #endif
54 
55 #ifdef __aarch64__
56 #define GENMASK				GENMASK_64
57 #else
58 #define GENMASK				GENMASK_32
59 #endif
60 
61 #define HI(addr)			(addr >> 32)
62 #define LO(addr)			(addr & 0xffffffff)
63 
64 /*
65  * This variant of div_round_up can be used in macro definition but should not
66  * be used in C code as the `div` parameter is evaluated twice.
67  */
68 #define DIV_ROUND_UP_2EVAL(n, d)	(((n) + (d) - 1) / (d))
69 
70 #define div_round_up(val, div) __extension__ ({	\
71 	__typeof__(div) _div = (div);		\
72 	((val) + _div - (__typeof__(div)) 1) / _div;		\
73 })
74 
75 #define MIN(x, y) __extension__ ({	\
76 	__typeof__(x) _x = (x);		\
77 	__typeof__(y) _y = (y);		\
78 	(void)(&_x == &_y);		\
79 	(_x < _y) ? _x : _y;		\
80 })
81 
82 #define MAX(x, y) __extension__ ({	\
83 	__typeof__(x) _x = (x);		\
84 	__typeof__(y) _y = (y);		\
85 	(void)(&_x == &_y);		\
86 	(_x > _y) ? _x : _y;		\
87 })
88 
89 #define CLAMP(x, min, max) __extension__ ({ \
90 	__typeof__(x) _x = (x); \
91 	__typeof__(min) _min = (min); \
92 	__typeof__(max) _max = (max); \
93 	(void)(&_x == &_min); \
94 	(void)(&_x == &_max); \
95 	((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
96 })
97 
98 /*
99  * The round_up() macro rounds up a value to the given boundary in a
100  * type-agnostic yet type-safe manner. The boundary must be a power of two.
101  * In other words, it computes the smallest multiple of boundary which is
102  * greater than or equal to value.
103  *
104  * round_down() is similar but rounds the value down instead.
105  */
106 #define round_boundary(value, boundary)		\
107 	((__typeof__(value))((boundary) - 1))
108 
109 #define round_up(value, boundary)		\
110 	((((value) - 1) | round_boundary(value, boundary)) + 1)
111 
112 #define round_down(value, boundary)		\
113 	((value) & ~round_boundary(value, boundary))
114 
115 /* add operation together with checking whether the operation overflowed
116  * The result is '*res',
117  * return 0 on success and 1 on overflow
118  */
119 #define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
120 
121 /*
122  * Round up a value to align with a given size and
123  * check whether overflow happens.
124  * The rounduped value is '*res',
125  * return 0 on success and 1 on overflow
126  */
127 #define round_up_overflow(v, size, res) (__extension__({ \
128 	typeof(res) __res = res; \
129 	typeof(*(__res)) __roundup_tmp = 0; \
130 	typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
131 	\
132 	add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
133 		(void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
134 }))
135 
136 /*
137  * Add a with b, then round up the result to align with a given size and
138  * check whether overflow happens.
139  * The rounduped value is '*res',
140  * return 0 on success and 1 on overflow
141  */
142 #define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
143 	typeof(a) __a = (a); \
144 	typeof(__a) __add_res = 0; \
145 	\
146 	add_overflow((__a), (b), &__add_res) ? 1 : \
147 		round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
148 }))
149 
150 /**
151  * Helper macro to ensure a value lies on a given boundary.
152  */
153 #define is_aligned(value, boundary)			\
154 	(round_up((uintptr_t) value, boundary) ==	\
155 	 round_down((uintptr_t) value, boundary))
156 
157 /*
158  * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
159  * Both arguments must be unsigned pointer values (i.e. uintptr_t).
160  */
161 #define check_uptr_overflow(_ptr, _inc)		\
162 	((_ptr) > (UINTPTR_MAX - (_inc)))
163 
164 /*
165  * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
166  * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
167  */
168 #define check_u32_overflow(_u32, _inc) \
169 	((_u32) > (UINT32_MAX - (_inc)))
170 
171 /* Register size of the current architecture. */
172 #ifdef __aarch64__
173 #define REGSZ		U(8)
174 #else
175 #define REGSZ		U(4)
176 #endif
177 
178 /*
179  * Test for the current architecture version to be at least the version
180  * expected.
181  */
182 #define ARM_ARCH_AT_LEAST(_maj, _min) \
183 	((ARM_ARCH_MAJOR > (_maj)) || \
184 	 ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
185 
186 /*
187  * Import an assembly or linker symbol as a C expression with the specified
188  * type
189  */
190 #define IMPORT_SYM(type, sym, name) \
191 	extern char sym[];\
192 	static const __attribute__((unused)) type name = (type) sym;
193 
194 /*
195  * When the symbol is used to hold a pointer, its alignment can be asserted
196  * with this macro. For example, if there is a linker symbol that is going to
197  * be used as a 64-bit pointer, the value of the linker symbol must also be
198  * aligned to 64 bit. This macro makes sure this is the case.
199  */
200 #define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
201 
202 #define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
203 
204 /* Compiler builtin of GCC >= 9 and planned in llvm */
205 #ifdef __HAVE_SPECULATION_SAFE_VALUE
206 # define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
207 #else
208 # define SPECULATION_SAFE_VALUE(var) var
209 #endif
210 
211 /*
212  * Ticks elapsed in one second with a signal of 1 MHz
213  */
214 #define MHZ_TICKS_PER_SEC	U(1000000)
215 
216 /*
217  * Ticks elapsed in one second with a signal of 1 KHz
218  */
219 #define KHZ_TICKS_PER_SEC U(1000)
220 
221 #endif /* UTILS_DEF_H */
222