1 /* LibTomCrypt, modular cryptographic library -- Tom St Denis */
2 /* SPDX-License-Identifier: Unlicense */
3
4 /* ---- HELPER MACROS ---- */
5 #ifdef ENDIAN_NEUTRAL
6
7 #define STORE32L(x, y) \
8 do { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \
9 (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0)
10
11 #define LOAD32L(x, y) \
12 do { x = ((ulong32)((y)[3] & 255)<<24) | \
13 ((ulong32)((y)[2] & 255)<<16) | \
14 ((ulong32)((y)[1] & 255)<<8) | \
15 ((ulong32)((y)[0] & 255)); } while(0)
16
17 #define STORE64L(x, y) \
18 do { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \
19 (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \
20 (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \
21 (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0)
22
23 #define LOAD64L(x, y) \
24 do { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \
25 (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32)| \
26 (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16)| \
27 (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } while(0)
28
29 #define STORE32H(x, y) \
30 do { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \
31 (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } while(0)
32
33 #define LOAD32H(x, y) \
34 do { x = ((ulong32)((y)[0] & 255)<<24) | \
35 ((ulong32)((y)[1] & 255)<<16) | \
36 ((ulong32)((y)[2] & 255)<<8) | \
37 ((ulong32)((y)[3] & 255)); } while(0)
38
39 #define STORE64H(x, y) \
40 do { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \
41 (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \
42 (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \
43 (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } while(0)
44
45 #define LOAD64H(x, y) \
46 do { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \
47 (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32) | \
48 (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16) | \
49 (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } while(0)
50
51
52 #elif defined(ENDIAN_LITTLE)
53
54 #ifdef LTC_HAVE_BSWAP_BUILTIN
55
56 #define STORE32H(x, y) \
57 do { ulong32 ttt = __builtin_bswap32 ((x)); \
58 XMEMCPY ((y), &ttt, 4); } while(0)
59
60 #define LOAD32H(x, y) \
61 do { XMEMCPY (&(x), (y), 4); \
62 (x) = __builtin_bswap32 ((x)); } while(0)
63
64 #elif !defined(LTC_NO_BSWAP) && (defined(INTEL_CC) || (defined(__GNUC__) && (defined(__DJGPP__) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__i386__) || defined(__x86_64__))))
65
66 #define STORE32H(x, y) \
67 asm __volatile__ ( \
68 "bswapl %0 \n\t" \
69 "movl %0,(%1)\n\t" \
70 "bswapl %0 \n\t" \
71 ::"r"(x), "r"(y): "memory");
72
73 #define LOAD32H(x, y) \
74 asm __volatile__ ( \
75 "movl (%1),%0\n\t" \
76 "bswapl %0\n\t" \
77 :"=r"(x): "r"(y): "memory");
78
79 #else
80
81 #define STORE32H(x, y) \
82 do { (y)[0] = (unsigned char)(((x)>>24)&255); (y)[1] = (unsigned char)(((x)>>16)&255); \
83 (y)[2] = (unsigned char)(((x)>>8)&255); (y)[3] = (unsigned char)((x)&255); } while(0)
84
85 #define LOAD32H(x, y) \
86 do { x = ((ulong32)((y)[0] & 255)<<24) | \
87 ((ulong32)((y)[1] & 255)<<16) | \
88 ((ulong32)((y)[2] & 255)<<8) | \
89 ((ulong32)((y)[3] & 255)); } while(0)
90
91 #endif
92
93 #ifdef LTC_HAVE_BSWAP_BUILTIN
94
95 #define STORE64H(x, y) \
96 do { ulong64 ttt = __builtin_bswap64 ((x)); \
97 XMEMCPY ((y), &ttt, 8); } while(0)
98
99 #define LOAD64H(x, y) \
100 do { XMEMCPY (&(x), (y), 8); \
101 (x) = __builtin_bswap64 ((x)); } while(0)
102
103 /* x86_64 processor */
104 #elif !defined(LTC_NO_BSWAP) && (defined(__GNUC__) && defined(__x86_64__))
105
106 #define STORE64H(x, y) \
107 asm __volatile__ ( \
108 "bswapq %0 \n\t" \
109 "movq %0,(%1)\n\t" \
110 "bswapq %0 \n\t" \
111 ::"r"(x), "r"(y): "memory");
112
113 #define LOAD64H(x, y) \
114 asm __volatile__ ( \
115 "movq (%1),%0\n\t" \
116 "bswapq %0\n\t" \
117 :"=r"(x): "r"(y): "memory");
118
119 #else
120
121 #define STORE64H(x, y) \
122 do { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \
123 (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \
124 (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \
125 (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } while(0)
126
127 #define LOAD64H(x, y) \
128 do { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48) | \
129 (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32) | \
130 (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16) | \
131 (((ulong64)((y)[6] & 255))<<8)|(((ulong64)((y)[7] & 255))); } while(0)
132
133 #endif
134
135 #ifdef ENDIAN_32BITWORD
136
137 #define STORE32L(x, y) \
138 do { ulong32 ttt = (x); XMEMCPY(y, &ttt, 4); } while(0)
139
140 #define LOAD32L(x, y) \
141 do { XMEMCPY(&(x), y, 4); } while(0)
142
143 #define STORE64L(x, y) \
144 do { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \
145 (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \
146 (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \
147 (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0)
148
149 #define LOAD64L(x, y) \
150 do { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48)| \
151 (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32)| \
152 (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16)| \
153 (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } while(0)
154
155 #else /* 64-bit words then */
156
157 #define STORE32L(x, y) \
158 do { ulong32 ttt = (x); XMEMCPY(y, &ttt, 4); } while(0)
159
160 #define LOAD32L(x, y) \
161 do { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } while(0)
162
163 #define STORE64L(x, y) \
164 do { ulong64 ttt = (x); XMEMCPY(y, &ttt, 8); } while(0)
165
166 #define LOAD64L(x, y) \
167 do { XMEMCPY(&(x), y, 8); } while(0)
168
169 #endif /* ENDIAN_64BITWORD */
170
171 #elif defined(ENDIAN_BIG)
172
173 #define STORE32L(x, y) \
174 do { (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \
175 (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0)
176
177 #define LOAD32L(x, y) \
178 do { x = ((ulong32)((y)[3] & 255)<<24) | \
179 ((ulong32)((y)[2] & 255)<<16) | \
180 ((ulong32)((y)[1] & 255)<<8) | \
181 ((ulong32)((y)[0] & 255)); } while(0)
182
183 #define STORE64L(x, y) \
184 do { (y)[7] = (unsigned char)(((x)>>56)&255); (y)[6] = (unsigned char)(((x)>>48)&255); \
185 (y)[5] = (unsigned char)(((x)>>40)&255); (y)[4] = (unsigned char)(((x)>>32)&255); \
186 (y)[3] = (unsigned char)(((x)>>24)&255); (y)[2] = (unsigned char)(((x)>>16)&255); \
187 (y)[1] = (unsigned char)(((x)>>8)&255); (y)[0] = (unsigned char)((x)&255); } while(0)
188
189 #define LOAD64L(x, y) \
190 do { x = (((ulong64)((y)[7] & 255))<<56)|(((ulong64)((y)[6] & 255))<<48) | \
191 (((ulong64)((y)[5] & 255))<<40)|(((ulong64)((y)[4] & 255))<<32) | \
192 (((ulong64)((y)[3] & 255))<<24)|(((ulong64)((y)[2] & 255))<<16) | \
193 (((ulong64)((y)[1] & 255))<<8)|(((ulong64)((y)[0] & 255))); } while(0)
194
195 #ifdef ENDIAN_32BITWORD
196
197 #define STORE32H(x, y) \
198 do { ulong32 ttt = (x); XMEMCPY(y, &ttt, 4); } while(0)
199
200 #define LOAD32H(x, y) \
201 do { XMEMCPY(&(x), y, 4); } while(0)
202
203 #define STORE64H(x, y) \
204 do { (y)[0] = (unsigned char)(((x)>>56)&255); (y)[1] = (unsigned char)(((x)>>48)&255); \
205 (y)[2] = (unsigned char)(((x)>>40)&255); (y)[3] = (unsigned char)(((x)>>32)&255); \
206 (y)[4] = (unsigned char)(((x)>>24)&255); (y)[5] = (unsigned char)(((x)>>16)&255); \
207 (y)[6] = (unsigned char)(((x)>>8)&255); (y)[7] = (unsigned char)((x)&255); } while(0)
208
209 #define LOAD64H(x, y) \
210 do { x = (((ulong64)((y)[0] & 255))<<56)|(((ulong64)((y)[1] & 255))<<48)| \
211 (((ulong64)((y)[2] & 255))<<40)|(((ulong64)((y)[3] & 255))<<32)| \
212 (((ulong64)((y)[4] & 255))<<24)|(((ulong64)((y)[5] & 255))<<16)| \
213 (((ulong64)((y)[6] & 255))<<8)| (((ulong64)((y)[7] & 255))); } while(0)
214
215 #else /* 64-bit words then */
216
217 #define STORE32H(x, y) \
218 do { ulong32 ttt = (x); XMEMCPY(y, &ttt, 4); } while(0)
219
220 #define LOAD32H(x, y) \
221 do { XMEMCPY(&(x), y, 4); x &= 0xFFFFFFFF; } while(0)
222
223 #define STORE64H(x, y) \
224 do { ulong64 ttt = (x); XMEMCPY(y, &ttt, 8); } while(0)
225
226 #define LOAD64H(x, y) \
227 do { XMEMCPY(&(x), y, 8); } while(0)
228
229 #endif /* ENDIAN_64BITWORD */
230 #endif /* ENDIAN_BIG */
231
232 #define BSWAP(x) ( ((x>>24)&0x000000FFUL) | ((x<<24)&0xFF000000UL) | \
233 ((x>>8)&0x0000FF00UL) | ((x<<8)&0x00FF0000UL) )
234
235
236 /* 32-bit Rotates */
237 #if defined(_MSC_VER)
238 #define LTC_ROx_BUILTIN
239
240 /* instrinsic rotate */
241 #include <stdlib.h>
242 #pragma intrinsic(_rotr,_rotl)
243 #define ROR(x,n) _rotr(x,n)
244 #define ROL(x,n) _rotl(x,n)
245 #define RORc(x,n) ROR(x,n)
246 #define ROLc(x,n) ROL(x,n)
247
248 #elif defined(LTC_HAVE_ROTATE_BUILTIN)
249 #define LTC_ROx_BUILTIN
250
251 #define ROR(x,n) __builtin_rotateright32(x,n)
252 #define ROL(x,n) __builtin_rotateleft32(x,n)
253 #define ROLc(x,n) ROL(x,n)
254 #define RORc(x,n) ROR(x,n)
255
256 #elif !defined(__STRICT_ANSI__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && !defined(INTEL_CC) && !defined(LTC_NO_ASM)
257 #define LTC_ROx_ASM
258
ROL(ulong32 word,int i)259 static inline ulong32 ROL(ulong32 word, int i)
260 {
261 asm ("roll %%cl,%0"
262 :"=r" (word)
263 :"0" (word),"c" (i));
264 return word;
265 }
266
ROR(ulong32 word,int i)267 static inline ulong32 ROR(ulong32 word, int i)
268 {
269 asm ("rorl %%cl,%0"
270 :"=r" (word)
271 :"0" (word),"c" (i));
272 return word;
273 }
274
275 #ifndef LTC_NO_ROLC
276
277 #define ROLc(word,i) ({ \
278 ulong32 ROLc_tmp = (word); \
279 __asm__ ("roll %2, %0" : \
280 "=r" (ROLc_tmp) : \
281 "0" (ROLc_tmp), \
282 "I" (i)); \
283 ROLc_tmp; \
284 })
285 #define RORc(word,i) ({ \
286 ulong32 RORc_tmp = (word); \
287 __asm__ ("rorl %2, %0" : \
288 "=r" (RORc_tmp) : \
289 "0" (RORc_tmp), \
290 "I" (i)); \
291 RORc_tmp; \
292 })
293
294 #else
295
296 #define ROLc ROL
297 #define RORc ROR
298
299 #endif
300
301 #elif !defined(__STRICT_ANSI__) && defined(LTC_PPC32)
302 #define LTC_ROx_ASM
303
ROL(ulong32 word,int i)304 static inline ulong32 ROL(ulong32 word, int i)
305 {
306 asm ("rotlw %0,%0,%2"
307 :"=r" (word)
308 :"0" (word),"r" (i));
309 return word;
310 }
311
ROR(ulong32 word,int i)312 static inline ulong32 ROR(ulong32 word, int i)
313 {
314 asm ("rotlw %0,%0,%2"
315 :"=r" (word)
316 :"0" (word),"r" (32-i));
317 return word;
318 }
319
320 #ifndef LTC_NO_ROLC
321
ROLc(ulong32 word,const int i)322 static inline ulong32 ROLc(ulong32 word, const int i)
323 {
324 asm ("rotlwi %0,%0,%2"
325 :"=r" (word)
326 :"0" (word),"I" (i));
327 return word;
328 }
329
RORc(ulong32 word,const int i)330 static inline ulong32 RORc(ulong32 word, const int i)
331 {
332 asm ("rotrwi %0,%0,%2"
333 :"=r" (word)
334 :"0" (word),"I" (i));
335 return word;
336 }
337
338 #else
339
340 #define ROLc ROL
341 #define RORc ROR
342
343 #endif
344
345
346 #else
347
348 /* rotates the hard way */
349 #define ROL(x, y) ( (((ulong32)(x)<<(ulong32)((y)&31)) | (((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL)
350 #define ROR(x, y) ( ((((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((y)&31)) | ((ulong32)(x)<<(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL)
351 #define ROLc(x, y) ( (((ulong32)(x)<<(ulong32)((y)&31)) | (((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL)
352 #define RORc(x, y) ( ((((ulong32)(x)&0xFFFFFFFFUL)>>(ulong32)((y)&31)) | ((ulong32)(x)<<(ulong32)((32-((y)&31))&31))) & 0xFFFFFFFFUL)
353
354 #endif
355
356
357 /* 64-bit Rotates */
358 #if defined(_MSC_VER)
359
360 /* instrinsic rotate */
361 #include <stdlib.h>
362 #pragma intrinsic(_rotr64,_rotr64)
363 #define ROR64(x,n) _rotr64(x,n)
364 #define ROL64(x,n) _rotl64(x,n)
365 #define ROR64c(x,n) ROR64(x,n)
366 #define ROL64c(x,n) ROL64(x,n)
367
368 #elif defined(LTC_HAVE_ROTATE_BUILTIN)
369
370 #define ROR64(x,n) __builtin_rotateright64(x,n)
371 #define ROL64(x,n) __builtin_rotateleft64(x,n)
372 #define ROR64c(x,n) ROR64(x,n)
373 #define ROL64c(x,n) ROL64(x,n)
374
375 #elif !defined(__STRICT_ANSI__) && defined(__GNUC__) && defined(__x86_64__) && !defined(INTEL_CC) && !defined(LTC_NO_ASM)
376
ROL64(ulong64 word,int i)377 static inline ulong64 ROL64(ulong64 word, int i)
378 {
379 asm("rolq %%cl,%0"
380 :"=r" (word)
381 :"0" (word),"c" (i));
382 return word;
383 }
384
ROR64(ulong64 word,int i)385 static inline ulong64 ROR64(ulong64 word, int i)
386 {
387 asm("rorq %%cl,%0"
388 :"=r" (word)
389 :"0" (word),"c" (i));
390 return word;
391 }
392
393 #ifndef LTC_NO_ROLC
394
395 #define ROL64c(word,i) ({ \
396 ulong64 ROL64c_tmp = word; \
397 __asm__ ("rolq %2, %0" : \
398 "=r" (ROL64c_tmp) : \
399 "0" (ROL64c_tmp), \
400 "J" (i)); \
401 ROL64c_tmp; \
402 })
403 #define ROR64c(word,i) ({ \
404 ulong64 ROR64c_tmp = word; \
405 __asm__ ("rorq %2, %0" : \
406 "=r" (ROR64c_tmp) : \
407 "0" (ROR64c_tmp), \
408 "J" (i)); \
409 ROR64c_tmp; \
410 })
411
412 #else /* LTC_NO_ROLC */
413
414 #define ROL64c ROL64
415 #define ROR64c ROR64
416
417 #endif
418
419 #else /* Not x86_64 */
420
421 #define ROL64(x, y) \
422 ( (((x)<<((ulong64)(y)&63)) | \
423 (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF))
424
425 #define ROR64(x, y) \
426 ( ((((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)(y)&CONST64(63))) | \
427 ((x)<<(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF))
428
429 #define ROL64c(x, y) \
430 ( (((x)<<((ulong64)(y)&63)) | \
431 (((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF))
432
433 #define ROR64c(x, y) \
434 ( ((((x)&CONST64(0xFFFFFFFFFFFFFFFF))>>((ulong64)(y)&CONST64(63))) | \
435 ((x)<<(((ulong64)64-((y)&63))&63))) & CONST64(0xFFFFFFFFFFFFFFFF))
436
437 #endif
438
439 #ifndef MAX
440 #define MAX(x, y) ( ((x)>(y))?(x):(y) )
441 #endif
442
443 #ifndef MIN
444 #define MIN(x, y) ( ((x)<(y))?(x):(y) )
445 #endif
446
447 #ifndef LTC_UNUSED_PARAM
448 #define LTC_UNUSED_PARAM(x) (void)(x)
449 #endif
450
451 /* there is no snprintf before Visual C++ 2015 */
452 #if defined(_MSC_VER) && _MSC_VER < 1900
453 #define snprintf _snprintf
454 #endif
455