1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #pragma once
8
9 #define PASTE(a, b) a ## b
10 #define _STRINGIFY(a) #a
11 #define STRINGIFY(a) _STRINGIFY(a)
12
13 #ifdef __ASSEMBLER__
14
15 /* Provide a helper macro to define integer constants that are not of the
16 * default type 'ìnt', but 'unsigned long'. When such constants are shared
17 * between assembly and C code, some assemblers will fail because don't support
18 * C-style integer suffixes like 'ul'. Using a macro works around this, as the
19 * suffix is only applies when the C compiler is used and dropped when the
20 * assembler runs.
21 */
22 #define UL_CONST(x) x
23 #define ULL_CONST(x) x
24 #define NULL 0
25
26 #else /* not __ASSEMBLER__ */
27
28 /* There is no difference between using 'ul' or 'lu' as suffix for numbers to
29 * enforce a specific type besides the default 'int'. Just when it comes to the
30 * printf() format specifiers, '%lu' is the only form that is supported. Thus
31 * 'ul' is the preferred suffix to avoid confusion.
32 */
33 #define UL_CONST(x) PASTE(x, ul)
34 #define ULL_CONST(x) PASTE(x, llu)
35 #define NULL ((void *)0)
36
37 #endif /* [not] __ASSEMBLER__ */
38
39 #define BIT(n) (UL_CONST(1) << (n))
40 #define MASK(n) (BIT(n) - UL_CONST(1))
41 #define IS_ALIGNED(n, b) (!((n) & MASK(b)))
42 #define ROUND_DOWN(n, b) (((n) >> (b)) << (b))
43 #define ROUND_UP(n, b) (((((n) - UL_CONST(1)) >> (b)) + UL_CONST(1)) << (b))
44 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
45 #define MIN(a,b) (((a)<(b))?(a):(b))
46 #define MAX(a,b) (((a)>(b))?(a):(b))
47
48 /* Time constants are define to use the 'unsigned long long'. Rationale is, that
49 * the C rules define the calculation result is determined by largest type
50 * involved. Enforcing the larges possible type C provides avoids pitfalls with
51 * 32-bit overflows when values are getting quite large. Keep in mind that even
52 * 2^32 milli-seconds roll over within 50 days, which is an uptime that embedded
53 * systems will reach easily and it resembles not even two months in a calendar
54 * calculation. In addition, using the largest integer type C currently defines
55 * enforces that all calculations results need a cast back to a 32-bit type
56 * explicitly. This might feel annoying, but practically it makes code more
57 * robust and enforces thinking about potential overflows.
58 * Note that at this stage of the includes, we do not have defined the type
59 * uint64_t yet, so we can't use any definitions around it, but have to stick to
60 * plain C types. Neither moving the time constant definitions behind the
61 * uint64_t type definitions nor including the header with the uint64_t
62 * definitions here is currently a feasible option.
63 */
64 #define MS_IN_S ULL_CONST(1000)
65 #define US_IN_MS ULL_CONST(1000)
66 #define HZ_IN_KHZ ULL_CONST(1000)
67 #define KHZ_IN_MHZ ULL_CONST(1000)
68 #define HZ_IN_MHZ ULL_CONST(1000000)
69
70 #ifndef __ASSEMBLER__
71
72 #define PACKED __attribute__((packed))
73 #define NORETURN __attribute__((__noreturn__))
74 #define CONST __attribute__((__const__))
75 #define PURE __attribute__((__pure__))
76 #define ALIGN(n) __attribute__((__aligned__(n)))
77 #define FASTCALL __attribute__((fastcall))
78 #ifdef __clang__
79 #define VISIBLE /* nothing */
80 #else
81 #define VISIBLE __attribute__((externally_visible))
82 #endif
83 #define NO_INLINE __attribute__((noinline))
84 #define FORCE_INLINE __attribute__((always_inline))
85 #define SECTION(sec) __attribute__((__section__(sec)))
86 #define UNUSED __attribute__((unused))
87 #define USED __attribute__((used))
88 #ifdef __clang__
89 #define FORCE_O2 /* nothing */
90 #else
91 #define FORCE_O2 __attribute__((optimize("O2")))
92 #endif
93 /** MODIFIES: */
94 void __builtin_unreachable(void);
95 #define UNREACHABLE() __builtin_unreachable()
96 #define MAY_ALIAS __attribute__((may_alias))
97
98 #define OFFSETOF(type, member) __builtin_offsetof(type, member)
99
100 #ifdef __GNUC__
101 /* Borrowed from linux/include/linux/compiler.h */
102 #define likely(x) __builtin_expect(!!(x), 1)
103 #define unlikely(x) __builtin_expect(!!(x), 0)
104 #else
105 #define likely(x) (!!(x))
106 #define unlikely(x) (!!(x))
107 #endif
108
109 /* need that for compiling with c99 instead of gnu99 */
110 #define asm __asm__
111
112 /* Evaluate a Kconfig-provided configuration setting at compile-time. */
113 #define config_set(macro) _is_set_(macro)
114 #define _macrotest_1 ,
115 #define _is_set_(value) _is_set__(_macrotest_##value)
116 #define _is_set__(comma) _is_set___(comma 1, 0)
117 #define _is_set___(_, v, ...) v
118
119 /* Check the existence of a configuration setting, returning one value if it
120 * exists and a different one if it does not */
121 #define config_ternary(macro, true, false) _config_ternary(macro, true, false)
122 #define _config_ternary(value, true, false) _config_ternary_(_macrotest_##value, true, false)
123 #define _config_ternary_(comma, true, false) _config_ternary__(comma true, false)
124 #define _config_ternary__(_, v, ...) v
125
126 /** MODIFIES:
127 FNSPEC
128 halt_spec: "\<Gamma> \<turnstile> {} Call halt_'proc {}"
129 */
130 void halt(void) NORETURN;
131 void memzero(void *s, unsigned long n);
132 void *memset(void *s, unsigned long c, unsigned long n) VISIBLE;
133 void *memcpy(void *ptr_dst, const void *ptr_src, unsigned long n) VISIBLE;
134 int PURE strncmp(const char *s1, const char *s2, int n);
135 long CONST char_to_long(char c);
136 long PURE str_to_long(const char *str);
137
138 /* Library functions for counting leading/trailing zeros.
139 *
140 * GCC/LLVM provides builtin function like __builtin_clzl() for this, which
141 * either get translated to machine specific instructions or calls helper
142 * functions like __clzsi2() that a compiler library is expected to implement.
143 * At the time of writing this comment, the GCC documentation about the compiler
144 * library (https://gcc.gnu.org/onlinedocs/gccint/Integer-library-routines.html)
145 * is not very detailed and the signatures given for these helper functions
146 * appear incorrect. For example, is says "int __clzsi2(unsigned int a)", but
147 * both the GCC and LLVM libraries implement it in a way that is independent of
148 * the implementation choices for the sizes of `unsigned int`. Instead, it
149 * appears that `si` always signifies a 32-bit argument and `di` always
150 * signifies a 64-bit argument. Tests with __builtin_clzl() on RISC-V have shown
151 * that if 'unsigned long' is 32 bits __builtin_clzl() uses __clzsi2() and if
152 * the type is 64 bits __builtin_clzl() uses __clzdi2(). Thus using the types
153 * uint32_t and uint64_t from stdint.h in the signatures below is considered the
154 * semantically correct way.
155 * Note that we only emit actual function implementations for these functions if
156 * CONFIG_CLZ_32 etc. are set. Otherwise, the compiler's internal implementation
157 * may get used or compilation fails if there is no machine instruction.
158 */
159 #include <stdint.h>
160 CONST int __clzsi2(uint32_t x);
161 CONST int __clzdi2(uint64_t x);
162 CONST int __ctzsi2(uint32_t x);
163 CONST int __ctzdi2(uint64_t x);
164
165 // Used for compile-time constants, so should always use the builtin.
166 #define CTZL(x) __builtin_ctzl(x)
167
168 // Count leading zeros.
169 // The CONFIG_CLZ_NO_BUILTIN macro may be used to expose the library function
170 // to the C parser for verification.
171 #ifndef CONFIG_CLZ_NO_BUILTIN
172 // If we use a compiler builtin, we cannot verify it, so we use the following
173 // annotations to hide the function body from the proofs, and axiomatise its
174 // behaviour.
175 // On the other hand, if we use our own implementation instead of the builtin,
176 // then we want to expose that implementation to the proofs, and therefore hide
177 // these annotations.
178 /** MODIFIES: */
179 /** DONT_TRANSLATE */
180 /** FNSPEC clzl_spec:
181 "\<forall>s. \<Gamma> \<turnstile>
182 {\<sigma>. s = \<sigma> \<and> x___unsigned_long_' s \<noteq> 0 }
183 \<acute>ret__long :== PROC clzl(\<acute>x)
184 \<lbrace> \<acute>ret__long = of_nat (word_clz (x___unsigned_long_' s)) \<rbrace>"
185 */
186 #endif
187 static inline long
clzl(unsigned long x)188 CONST clzl(unsigned long x)
189 {
190 #ifdef CONFIG_CLZ_NO_BUILTIN
191 #if CONFIG_WORD_SIZE == 32
192 return __clzsi2(x);
193 #else
194 return __clzdi2(x);
195 #endif
196 #else
197 return __builtin_clzl(x);
198 #endif
199 }
200
201 #ifndef CONFIG_CLZ_NO_BUILTIN
202 // See comments on clzl.
203 /** MODIFIES: */
204 /** DONT_TRANSLATE */
205 /** FNSPEC clzll_spec:
206 "\<forall>s. \<Gamma> \<turnstile>
207 {\<sigma>. s = \<sigma> \<and> x___unsigned_longlong_' s \<noteq> 0 }
208 \<acute>ret__longlong :== PROC clzll(\<acute>x)
209 \<lbrace> \<acute>ret__longlong = of_nat (word_clz (x___unsigned_longlong_' s)) \<rbrace>"
210 */
211 #endif
212 static inline long long
clzll(unsigned long long x)213 CONST clzll(unsigned long long x)
214 {
215 #ifdef CONFIG_CLZ_NO_BUILTIN
216 return __clzdi2(x);
217 #else
218 return __builtin_clzll(x);
219 #endif
220 }
221
222 // Count trailing zeros.
223 #ifndef CONFIG_CTZ_NO_BUILTIN
224 // See comments on clzl.
225 /** MODIFIES: */
226 /** DONT_TRANSLATE */
227 /** FNSPEC ctzl_spec:
228 "\<forall>s. \<Gamma> \<turnstile>
229 {\<sigma>. s = \<sigma> \<and> x___unsigned_long_' s \<noteq> 0 }
230 \<acute>ret__long :== PROC ctzl(\<acute>x)
231 \<lbrace> \<acute>ret__long = of_nat (word_ctz (x___unsigned_long_' s)) \<rbrace>"
232 */
233 #endif
234 static inline long
ctzl(unsigned long x)235 CONST ctzl(unsigned long x)
236 {
237 #ifdef CONFIG_CTZ_NO_BUILTIN
238 // If there is a builtin CLZ, but no builtin CTZ, then CTZ will be implemented
239 // using the builtin CLZ, rather than the long-form implementation.
240 // This is typically the fastest way to calculate ctzl on such platforms.
241 #ifdef CONFIG_CLZ_NO_BUILTIN
242 // Here, there are no builtins we can use, so call the library function.
243 #if CONFIG_WORD_SIZE == 32
244 return __ctzsi2(x);
245 #else
246 return __ctzdi2(x);
247 #endif
248 #else
249 // Here, we have __builtin_clzl, but no __builtin_ctzl.
250 if (unlikely(x == 0)) {
251 return 8 * sizeof(unsigned long);
252 }
253 // -x = ~x + 1, so (x & -x) isolates the least significant 1-bit of x,
254 // allowing ctzl to be calculated from clzl and the word size.
255 return 8 * sizeof(unsigned long) - 1 - __builtin_clzl(x & -x);
256 #endif
257 #else
258 // Here, we have __builtin_ctzl.
259 return __builtin_ctzl(x);
260 #endif
261 }
262
263 #ifndef CONFIG_CTZ_NO_BUILTIN
264 // See comments on clzl.
265 /** MODIFIES: */
266 /** DONT_TRANSLATE */
267 /** FNSPEC ctzll_spec:
268 "\<forall>s. \<Gamma> \<turnstile>
269 {\<sigma>. s = \<sigma> \<and> x___unsigned_longlong_' s \<noteq> 0 }
270 \<acute>ret__longlong :== PROC ctzll(\<acute>x)
271 \<lbrace> \<acute>ret__longlong = of_nat (word_ctz (x___unsigned_longlong_' s)) \<rbrace>"
272 */
273 #endif
274 static inline long long
ctzll(unsigned long long x)275 CONST ctzll(unsigned long long x)
276 {
277 #ifdef CONFIG_CTZ_NO_BUILTIN
278 // See comments on ctzl.
279 #ifdef CONFIG_CLZ_NO_BUILTIN
280 return __ctzdi2(x);
281 #else
282 if (unlikely(x == 0)) {
283 return 8 * sizeof(unsigned long long);
284 }
285 // See comments on ctzl.
286 return 8 * sizeof(unsigned long long) - 1 - __builtin_clzll(x & -x);
287 #endif
288 #else
289 return __builtin_ctzll(x);
290 #endif
291 }
292
293 int __builtin_popcountl(unsigned long x);
294
295 /** DONT_TRANSLATE */
296 static inline long
popcountl(unsigned long mask)297 CONST popcountl(unsigned long mask)
298 {
299 #ifndef __POPCNT__
300 unsigned int count; // c accumulates the total bits set in v
301 for (count = 0; mask; count++) {
302 mask &= mask - 1; // clear the least significant bit set
303 }
304
305 return count;
306 #else
307 return __builtin_popcountl(mask);
308 #endif
309 }
310
311 #define POPCOUNTL(x) popcountl(x)
312
313 /* Can be used to insert padding to the next L1 cache line boundary */
314 #define PAD_TO_NEXT_CACHE_LN(used) char padding[L1_CACHE_LINE_SIZE - ((used) % L1_CACHE_LINE_SIZE)]
315
316 #endif /* !__ASSEMBLER__ */
317