1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2022, Linaro Limited
4 */
5 #ifndef __FAULT_MITIGATION_H
6 #define __FAULT_MITIGATION_H
7
8 #include <assert.h>
9 #include <config.h>
10 #include <string.h>
11 #include <util.h>
12
13 #ifdef __KERNEL__
14 #include <kernel/panic.h>
15 #include <kernel/thread.h>
16 #else
17 #include <tee_api.h>
18 #endif
19
20 /*
21 * Fault migitigation helpers to make successful Hardware Fault Attacks
22 * harder to achieve. The paper [1] by Riscure gives background to the
23 * problem.
24 *
25 * These helpers aim to make it hard for a single glitch attack to succeed
26 * while the protected function or one of the ftmn_*() functions are
27 * executed.
28 *
29 * To have something to work with we assume that a single glitch may affect
30 * a few instructions in sequence to do nothing or to corrupt the content
31 * of a few registers.
32 *
33 * Using the terminology from [1] we are implementing the following patterns:
34 * 3 FAULT.VALUE.CHECK
35 * 5 FAULT.DECISION.CHECK
36 * 9 FAULT.FLOW.CONTROL
37 *
38 * Additionally are the following patterns also acknowledged with a few
39 * comments:
40 * 1. FAULT.CONSTANT.CODING
41 * Zero is normally a success code in OP-TEE so special functions are
42 * added to record anything but a zero result.
43 * 8. FAULT.NESTED.CHECK
44 * The linked calls performed by for instance FTMN_CALL_FUNC() addresses
45 * this by relying on the called function to update a state in
46 * struct ftmn_func_arg which is checked when the function has returned.
47 * 11. FAULT.PENALTY
48 * This is implicit since we're normally trying to protect things post
49 * boot and booting takes quite some time.
50 *
51 * [1] https://web.archive.org/web/20220616035354/https://www.riscure.com/uploads/2020/05/Riscure_Whitepaper_Fault_Mitigation_Patterns_final.pdf
52 */
53
54 #include <stdint.h>
55 #include <stdbool.h>
56
57 /*
58 * struct ftmn_check - track current checked state
59 * @steps: accumulated checkpoints
60 * @res: last stored result or return value
61 *
62 * While a function is executed it can update its state as a way of keeping
63 * track of important passages inside the function. When the function
64 * returns with for instance ftmn_return_res() it is checked that the
65 * accumulated state matches the expected state.
66 *
67 * @res is xored with FTMN_DEFAULT_HASH in order to retrieve the saved
68 * result or return value.
69 */
70 struct ftmn_check {
71 unsigned long steps;
72 unsigned long res;
73 };
74
75 /*
76 * struct ftmn_func_arg - track a called function
77 * @hash: xor bitmask
78 * @res: stored result xored with @hash
79 *
80 * When the call of a function is tracked @hash is initialized to hash of
81 * caller xored with hash of called function. Before the called function
82 * updates @res it first xors @hash with its own hash, which is supposed to
83 * restore @hash to the hash of the calling function. This allows the
84 * calling function to confirm that the correct function has been called.
85 */
86 struct ftmn_func_arg {
87 unsigned long hash;
88 unsigned long res;
89 };
90
91 /*
92 * struct ftmn - link a tracked call chain
93 * @check: local checked state
94 * @arg: argument for the next called tracked function
95 * @saved_arg: pointer to an optional argument passed to this function
96 * @arg_pp: cached return value from __ftmn_get_tsd_func_arg_pp()
97 * @my_hash: the hash of the calling function
98 * @called_hash:the hash of the called function
99 *
100 * In order to maintain the linked call chain of tracked functions the
101 * struct ftmn_func_arg passed to this function is saved in @saved_arg
102 * before updating the argument pointer with @arg.
103 */
104 struct ftmn {
105 struct ftmn_check check;
106 struct ftmn_func_arg arg;
107 struct ftmn_func_arg *saved_arg;
108 struct ftmn_func_arg **arg_pp;
109 unsigned long my_hash;
110 unsigned long called_hash;
111 };
112
113 /*
114 * enum ftmn_incr - increase counter values
115 *
116 * Prime numbers to be used when increasing the accumulated state.
117 * Different increase counters can be used to keep apart different
118 * checkpoints.
119 */
120 enum ftmn_incr {
121 FTMN_INCR0 = 7873,
122 FTMN_INCR1 = 7877,
123 FTMN_INCR2 = 7879,
124 FTMN_INCR3 = 7883,
125 FTMN_INCR4 = 7901,
126 FTMN_INCR5 = 7907,
127 FTMN_INCR_RESERVED = 7919,
128 };
129
130 typedef int (*ftmn_memcmp_t)(const void *p1, const void *p2, size_t nb);
131
132 /* The default hash used when xoring the result in struct ftmn_check */
133 #ifdef __ILP32__
134 #define FTMN_DEFAULT_HASH 0x9c478bf6UL
135 #else
136 #define FTMN_DEFAULT_HASH 0xc478bf63e9500cb5UL
137 #endif
138
139 /*
140 * FTMN_PANIC() - FTMN specific panic function
141 *
142 * This function is called whenever the FTMN function detects an
143 * inconsistency. An inconsistency is able to occur if the system is
144 * subject to a fault injection attack, in this case doing a panic() isn't
145 * an extreme measure.
146 */
147 #ifdef __KERNEL__
148 #define FTMN_PANIC() panic();
149 #else
150 #define FTMN_PANIC() TEE_Panic(0);
151 #endif
152
153 #define __FTMN_MAX_FUNC_NAME_LEN 256
154
155 #define __FTMN_FUNC_BYTE(f, o, l) ((o) < (l) ? (uint8_t)(f)[(o)] : 0)
156
157 #ifdef __ILP32__
158 #define __FTMN_GET_FUNC_U32(f, o, l) \
159 (SHIFT_U32(__FTMN_FUNC_BYTE((f), (o), (l)), 0) | \
160 SHIFT_U32(__FTMN_FUNC_BYTE((f), (o) + 1, (l)), 8) | \
161 SHIFT_U32(__FTMN_FUNC_BYTE((f), (o) + 2, (l)), 16) | \
162 SHIFT_U32(__FTMN_FUNC_BYTE((f), (o) + 3, (l)), 24))
163
164 #define __FTMN_FUNC_HASH64(f, o, l) \
165 (__FTMN_GET_FUNC_U32((f), (o), (l)) ^ \
166 __FTMN_GET_FUNC_U32((f), (o) + 4, (l)))
167
168 #define __FTMN_FUNC_HASH32(f, o, l) \
169 (__FTMN_FUNC_HASH64((f), (o), (l)) ^ \
170 __FTMN_FUNC_HASH64((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 16, (l)))
171 #else
172 #define __FTMN_GET_FUNC_U64(f, o, l) \
173 (SHIFT_U64(__FTMN_FUNC_BYTE((f), (o), (l)), 0) | \
174 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 1, (l)), 8) | \
175 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 2, (l)), 16) | \
176 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 3, (l)), 24) | \
177 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 4, (l)), 32) | \
178 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 5, (l)), 40) | \
179 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 6, (l)), 48) | \
180 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 7, (l)), 56))
181
182 #define __FTMN_FUNC_HASH32(f, o, l) \
183 (__FTMN_GET_FUNC_U64((f), (o), (l)) ^ \
184 __FTMN_GET_FUNC_U64((f), (o) + 8, (l)))
185 #endif
186
187 #define __FTMN_FUNC_HASH16(f, o, l) \
188 (__FTMN_FUNC_HASH32((f), (o), (l)) ^ \
189 __FTMN_FUNC_HASH32((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 16, (l)))
190
191 #define __FTMN_FUNC_HASH8(f, o, l) \
192 (__FTMN_FUNC_HASH16((f), (o), (l)) ^ \
193 __FTMN_FUNC_HASH16((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 8, (l)))
194
195 #define __FTMN_FUNC_HASH4(f, o, l) \
196 (__FTMN_FUNC_HASH8((f), (o), (l)) ^ \
197 __FTMN_FUNC_HASH8((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 4, (l)))
198
199 #define __FTMN_FUNC_HASH2(f, l) \
200 (__FTMN_FUNC_HASH4(f, 0, l) ^ \
201 __FTMN_FUNC_HASH4(f, __FTMN_MAX_FUNC_NAME_LEN / 2, l))
202
203 #define __FTMN_FUNC_HASH(f, l) (unsigned long)__FTMN_FUNC_HASH2((f), (l))
204
205 #define __ftmn_step_count_1(c0) ((c0) * FTMN_INCR0)
206 #define __ftmn_step_count_2(c0, c1) \
207 (__ftmn_step_count_1(c0) + (c1) * FTMN_INCR1)
208 #define __ftmn_step_count_3(c0, c1, c2) \
209 (__ftmn_step_count_2(c0, c1) + (c2) * FTMN_INCR2)
210 #define __ftmn_step_count_4(c0, c1, c2, c3) \
211 (__ftmn_step_count_3(c0, c1, c2) + (c3) * FTMN_INCR3)
212 #define __ftmn_step_count_5(c0, c1, c2, c3, c4) \
213 (__ftmn_step_count_4(c0, c1, c2, c3) + (c4) * FTMN_INCR4)
214 #define __ftmn_step_count_6(c0, c1, c2, c3, c4, c5) \
215 (__ftmn_step_count_5(c0, c1, c2, c3, c4) + (c5) * FTMN_INCR5)
216 #define ___ftmn_args_count(_0, _1, _2, _3, _4, _5, x, ...) x
217 #define __ftmn_args_count(...) \
218 ___ftmn_args_count(__VA_ARGS__, 6, 5, 4, 3, 2, 1, 0)
219 #define ___ftmn_step_count(count, ...) __ftmn_step_count_ ## count(__VA_ARGS__)
220 #define __ftmn_step_count(count, ...) ___ftmn_step_count(count, __VA_ARGS__)
221
222 unsigned long ___ftmn_return_res(struct ftmn_check *check, unsigned long steps,
223 unsigned long res);
224 void ___ftmn_expect_state(struct ftmn_check *check, enum ftmn_incr incr,
225 unsigned long steps, unsigned long res);
226
227 void ___ftmn_callee_done(struct ftmn_func_arg *arg, unsigned long my_hash,
228 unsigned long res);
229 void ___ftmn_callee_done_not_zero(struct ftmn_func_arg *arg,
230 unsigned long my_hash,
231 unsigned long res);
232 void ___ftmn_callee_done_memcmp(struct ftmn_func_arg *arg,
233 unsigned long my_hash, int res,
234 ftmn_memcmp_t my_memcmp,
235 const void *p1, const void *p2, size_t nb);
236 void ___ftmn_callee_done_check(struct ftmn_func_arg *arg, unsigned long my_hash,
237 struct ftmn_check *check, enum ftmn_incr incr,
238 unsigned long steps, unsigned long res);
239
240 void ___ftmn_callee_update_not_zero(struct ftmn_func_arg *arg,
241 unsigned long res);
242
243 void ___ftmn_set_check_res(struct ftmn_check *check, enum ftmn_incr incr,
244 unsigned long res);
245 void ___ftmn_set_check_res_not_zero(struct ftmn_check *check,
246 enum ftmn_incr incr,
247 unsigned long res);
248 void ___ftmn_set_check_res_memcmp(struct ftmn_check *check, enum ftmn_incr incr,
249 int res, ftmn_memcmp_t my_memcmp,
250 const void *p1, const void *p2, size_t nb);
251
252 void ___ftmn_copy_linked_call_res(struct ftmn_check *check, enum ftmn_incr incr,
253 struct ftmn_func_arg *arg, unsigned long res);
254
255
256 #ifndef __KERNEL__
257 extern struct ftmn_func_arg *__ftmn_global_func_arg;
258 #endif
259
__ftmn_get_tsd_func_arg_pp(void)260 static inline struct ftmn_func_arg **__ftmn_get_tsd_func_arg_pp(void)
261 {
262 #if defined(CFG_FAULT_MITIGATION) && defined(__KERNEL__)
263 if (thread_get_id_may_fail() >= 0)
264 return &thread_get_tsd()->ftmn_arg;
265 else
266 return &thread_get_core_local()->ftmn_arg;
267 #elif defined(CFG_FAULT_MITIGATION)
268 return &__ftmn_global_func_arg;
269 #else
270 return NULL;
271 #endif
272 }
273
__ftmn_get_tsd_func_arg(void)274 static inline struct ftmn_func_arg *__ftmn_get_tsd_func_arg(void)
275 {
276 struct ftmn_func_arg **pp = __ftmn_get_tsd_func_arg_pp();
277
278 if (!pp)
279 return NULL;
280
281 return *pp;
282 }
283
__ftmn_push_linked_call(struct ftmn * ftmn,unsigned long my_hash,unsigned long called_hash)284 static inline void __ftmn_push_linked_call(struct ftmn *ftmn,
285 unsigned long my_hash,
286 unsigned long called_hash)
287 {
288 struct ftmn_func_arg **arg_pp = __ftmn_get_tsd_func_arg_pp();
289
290 if (arg_pp) {
291 ftmn->arg_pp = arg_pp;
292 ftmn->my_hash = my_hash;
293 ftmn->called_hash = called_hash;
294 ftmn->saved_arg = *ftmn->arg_pp;
295 *ftmn->arg_pp = &ftmn->arg;
296 ftmn->arg.hash = my_hash;
297 }
298 }
299
__ftmn_pop_linked_call(struct ftmn * ftmn)300 static inline void __ftmn_pop_linked_call(struct ftmn *ftmn)
301 {
302 if (ftmn->arg_pp)
303 *ftmn->arg_pp = ftmn->saved_arg;
304 }
305
__ftmn_copy_linked_call_res(struct ftmn * f,enum ftmn_incr incr,unsigned long res)306 static inline void __ftmn_copy_linked_call_res(struct ftmn *f,
307 enum ftmn_incr incr,
308 unsigned long res)
309 {
310 if (f->arg_pp) {
311 assert(f->arg.hash == (f->my_hash ^ f->called_hash));
312 assert(&f->arg == *f->arg_pp);
313 assert((f->arg.hash ^ f->arg.res) == res);
314 ___ftmn_copy_linked_call_res(&f->check, incr, &f->arg, res);
315 }
316 }
317
__ftmn_calle_swap_hash(struct ftmn_func_arg * arg,unsigned long my_old_hash,unsigned long my_new_hash)318 static inline void __ftmn_calle_swap_hash(struct ftmn_func_arg *arg,
319 unsigned long my_old_hash,
320 unsigned long my_new_hash)
321 {
322 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
323 arg->hash ^= my_old_hash ^ my_new_hash;
324 }
325
__ftmn_callee_done(struct ftmn_func_arg * arg,unsigned long my_hash,unsigned long res)326 static inline void __ftmn_callee_done(struct ftmn_func_arg *arg,
327 unsigned long my_hash, unsigned long res)
328 {
329 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
330 ___ftmn_callee_done(arg, my_hash, res);
331 }
332
__ftmn_callee_done_not_zero(struct ftmn_func_arg * arg,unsigned long hash,unsigned long res)333 static inline void __ftmn_callee_done_not_zero(struct ftmn_func_arg *arg,
334 unsigned long hash,
335 unsigned long res)
336 {
337 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
338 ___ftmn_callee_done_not_zero(arg, hash, res);
339 }
340
341 static inline int
__ftmn_callee_done_memcmp(struct ftmn_func_arg * arg,unsigned long hash,ftmn_memcmp_t my_memcmp,const void * p1,const void * p2,size_t nb)342 __ftmn_callee_done_memcmp(struct ftmn_func_arg *arg, unsigned long hash,
343 ftmn_memcmp_t my_memcmp,
344 const void *p1, const void *p2, size_t nb)
345 {
346 int res = my_memcmp(p1, p2, nb);
347
348 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
349 ___ftmn_callee_done_memcmp(arg, hash, res, my_memcmp,
350 p1, p2, nb);
351
352 return res;
353 }
354
__ftmn_callee_done_check(struct ftmn * ftmn,unsigned long my_hash,enum ftmn_incr incr,unsigned long steps,unsigned long res)355 static inline void __ftmn_callee_done_check(struct ftmn *ftmn,
356 unsigned long my_hash,
357 enum ftmn_incr incr,
358 unsigned long steps,
359 unsigned long res)
360 {
361 if (IS_ENABLED(CFG_FAULT_MITIGATION))
362 ___ftmn_callee_done_check(__ftmn_get_tsd_func_arg(), my_hash,
363 &ftmn->check, incr, steps, res);
364 }
365
__ftmn_callee_update_not_zero(struct ftmn_func_arg * arg,unsigned long res)366 static inline void __ftmn_callee_update_not_zero(struct ftmn_func_arg *arg,
367 unsigned long res)
368 {
369 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
370 ___ftmn_callee_update_not_zero(arg, res);
371 }
372
__ftmn_set_check_res(struct ftmn * ftmn,enum ftmn_incr incr,unsigned long res)373 static inline void __ftmn_set_check_res(struct ftmn *ftmn, enum ftmn_incr incr,
374 unsigned long res)
375 {
376 if (IS_ENABLED(CFG_FAULT_MITIGATION))
377 ___ftmn_set_check_res(&ftmn->check, incr, res);
378 }
379
__ftmn_set_check_res_not_zero(struct ftmn * ftmn,enum ftmn_incr incr,unsigned long res)380 static inline void __ftmn_set_check_res_not_zero(struct ftmn *ftmn,
381 enum ftmn_incr incr,
382 unsigned long res)
383 {
384 if (IS_ENABLED(CFG_FAULT_MITIGATION))
385 ___ftmn_set_check_res_not_zero(&ftmn->check, incr, res);
386 }
387
388
389
390 /*
391 * FTMN_FUNC_HASH() - "hash" a function name
392 *
393 * Function names are "hashed" into an unsigned long. The "hashing" is done
394 * by xoring each 32/64 bit word of the function name producing a bit
395 * pattern that should be mostly unique for each function. Only the first
396 * 256 characters of the name are used when xoring as this is expected to
397 * be optimized to be calculated when compiling the source code in order to
398 * minimize the overhead.
399 */
400 #define FTMN_FUNC_HASH(name) __FTMN_FUNC_HASH(name, sizeof(name))
401
402 /*
403 * FTMN_PUSH_LINKED_CALL() - push call into a linked call chain
404 * @ftmn: The local struct ftmn
405 * @called_func_hash: The hash of the called function
406 *
407 * Inserts a call into a linked call chain or starts a new call chain if
408 * the passed struct ftmn_func_arg pointer was NULL.
409 *
410 * Each FTMN_PUSH_LINKED_CALL() is supposed to be matched by a
411 * FTMN_POP_LINKED_CALL().
412 */
413 #define FTMN_PUSH_LINKED_CALL(ftmn, called_func_hash) \
414 __ftmn_push_linked_call((ftmn), FTMN_FUNC_HASH(__func__), \
415 (called_func_hash))
416
417 /*
418 * FTMN_SET_CHECK_RES_FROM_CALL() - copy the result from a linked call
419 * @ftmn: The struct ftmn used during the linked call
420 * @incr: Value to increase the checked state with
421 * @res: Returned result to be match against the saved/copied result
422 *
423 * This macro is called just after a checked linked function has returned.
424 * The return value from the function is copied from the struct ftmn_func_arg
425 * passed to the called function into the local checked state. The checked
426 * state is increased with @incr. @res is checked against the saved result
427 * of the called function.
428 */
429 #define FTMN_SET_CHECK_RES_FROM_CALL(ftmn, incr, res) \
430 __ftmn_copy_linked_call_res((ftmn), (incr), (res))
431
432 /*
433 * FTMN_POP_LINKED_CALL() - remove a call from a linked call chain
434 * @ftmn: The local struct ftmn
435 *
436 * Supposed to match a call to FTMN_PUSH_LINKED_CALL()
437 */
438 #define FTMN_POP_LINKED_CALL(ftmn) __ftmn_pop_linked_call((ftmn))
439
440 /*
441 * FTMN_CALL_FUNC() - Do a linked call to a function
442 * @res: Variable to be assigned the result of the called function
443 * @ftmn: The local struct ftmn
444 * @incr: Value to increase the checked state with
445 * @func: Function to be called
446 * @...: Arguments to pass to @func
447 *
448 * This macro can be used to make a linked call to another function, the
449 * callee. This macro depends on the callee to always update the struct
450 * ftmn_func_arg (part of struct ftmn) even when returning an error.
451 *
452 * Note that in the cases where the callee may skip updating the struct
453 * ftmn_func_arg this macro cannot be used as
454 * FTMN_SET_CHECK_RES_FROM_CALL() would cause a panic due to mismatching
455 * return value and saved result.
456 */
457 #define FTMN_CALL_FUNC(res, ftmn, incr, func, ...) \
458 do { \
459 FTMN_PUSH_LINKED_CALL((ftmn), FTMN_FUNC_HASH(#func)); \
460 (res) = func(__VA_ARGS__); \
461 FTMN_SET_CHECK_RES_FROM_CALL((ftmn), (incr), (res)); \
462 FTMN_POP_LINKED_CALL((ftmn)); \
463 } while (0)
464
465 /*
466 * FTMN_CALLEE_DONE() - Record result of callee
467 * @res: Result or return value
468 *
469 * The passed result will be stored in the struct ftmn_func_arg struct
470 * supplied by the caller. This function must only be called once by the
471 * callee.
472 *
473 * Note that this function is somewhat dangerous as any passed value will
474 * be stored so if the value has been tampered with there is no additional
475 * redundant checks to rely on.
476 */
477 #define FTMN_CALLEE_DONE(res) \
478 __ftmn_callee_done(__ftmn_get_tsd_func_arg(), \
479 FTMN_FUNC_HASH(__func__), (res))
480 /*
481 * FTMN_CALLEE_DONE_NOT_ZERO() - Record non-zero result of callee
482 * @res: Result or return value
483 *
484 * The passed result will be stored in the struct ftmn_func_arg struct
485 * supplied by the caller. This function must only be called once by the
486 * callee.
487 *
488 * Note that this function is somewhat dangerous as any passed value will
489 * be stored so if the value has been tampered with there is no additional
490 * redundant checks to rely on. However, there are extra checks against
491 * unintentionally storing a zero which often is interpreted as a
492 * successful return value.
493 */
494 #define FTMN_CALLEE_DONE_NOT_ZERO(res) \
495 __ftmn_callee_done_not_zero(__ftmn_get_tsd_func_arg(), \
496 FTMN_FUNC_HASH(__func__), (res))
497
498 /*
499 * FTMN_CALLEE_DONE_CHECK() - Record result of callee with checked state
500 * @ftmn: The local struct ftmn
501 * @incr: Value to increase the checked state with
502 * @exp_steps: Expected recorded checkpoints
503 * @res: Result or return value
504 *
505 * The passed result will be stored in the struct ftmn_func_arg struct
506 * supplied by the caller. This function must only be called once by the
507 * callee.
508 *
509 * @res is double checked against the value stored in local checked state.
510 * @exp_steps is checked against the locate checked state. The local
511 * checked state is increased by @incr.
512 */
513 #define FTMN_CALLEE_DONE_CHECK(ftmn, incr, exp_steps, res) \
514 __ftmn_callee_done_check((ftmn), FTMN_FUNC_HASH(__func__), \
515 (incr), (exp_steps), (res))
516
517 /*
518 * FTMN_CALLEE_DONE_MEMCMP() - Record result of memcmp() in a callee
519 * @my_memcmp: Function pointer of custom memcmp()
520 * @p1: Pointer to first buffer
521 * @p2: Pointer to second buffer
522 * @nb: Number of bytes
523 *
524 * The result from the mem compare is saved in the local checked state.
525 * This function must only be called once by the callee.
526 */
527 #define FTMN_CALLEE_DONE_MEMCMP(my_memcmp, p1, p2, nb) \
528 __ftmn_callee_done_memcmp(__ftmn_get_tsd_func_arg(), \
529 FTMN_FUNC_HASH(__func__), (my_memcmp), \
530 (p1), (p2), (nb))
531
532 /*
533 * FTMN_CALLEE_UPDATE_NOT_ZERO() - Update the result of a callee with a
534 * non-zero value
535 * @res: Result or return value
536 *
537 * The passed result will be stored in the struct ftmn_func_arg struct
538 * supplied by the caller. This function can be called any number of times
539 * by the callee, provided that one of the FTMN_CALLEE_DONE_XXX() functions
540 * has been called first.
541 *
542 * Note that this function is somewhat dangerous as any passed value will
543 * be stored so if the value has been tampered with there is no additional
544 * redundant checks to rely on. However, there are extra checks against
545 * unintentionally storing a zero which often is interpreted as a
546 * successful return value.
547 */
548 #define FTMN_CALLEE_UPDATE_NOT_ZERO(res) \
549 __ftmn_callee_update_not_zero(__ftmn_get_tsd_func_arg(), res)
550
551 /*
552 * FTMN_CALLEE_SWAP_HASH() - Remove old hash and add new hash
553 * @my_old_hash: The old hash to remove
554 *
555 * This macro replaces the old expected function hash with the hash of the
556 * current function.
557 *
558 * If a function is called using an alias the caller uses the hash of the
559 * alias not the real function name. This hash is recoded in the field
560 * "hash" in struct ftmn_func_arg which can be found with
561 * __ftmn_get_tsd_func_arg().
562 *
563 * The FTMN_CALLE_* functions only work with the real function name so the
564 * old hash must be removed and replaced with the new for the calling
565 * function to be able to verify the result.
566 */
567 #define FTMN_CALLEE_SWAP_HASH(my_old_hash) \
568 __ftmn_calle_swap_hash(__ftmn_get_tsd_func_arg(), \
569 (my_old_hash), FTMN_FUNC_HASH(__func__))
570
571 /*
572 * FTMN_SET_CHECK_RES() - Records a result in local checked state
573 * @ftmn: The local struct ftmn
574 * @incr: Value to increase the checked state with
575 * @res: Result or return value
576 *
577 * Note that this function is somewhat dangerous as any passed value will
578 * be stored so if the value has been tampered with there is no additional
579 * redundant checks to rely on.
580 */
581 #define FTMN_SET_CHECK_RES(ftmn, incr, res) \
582 __ftmn_set_check_res((ftmn), (incr), (res))
583
584 /*
585 * FTMN_SET_CHECK_RES_NOT_ZERO() - Records a non-zero result in local checked
586 * state
587 * @ftmn: The local struct ftmn
588 * @incr: Value to increase the checked state with
589 * @res: Result or return value
590 *
591 * Note that this function is somewhat dangerous as any passed value will
592 * be stored so if the value has been tampered with there is no additional
593 * redundant checks to rely on. However, there are extra checks against
594 * unintentionally storing a zero which often is interpreted as a
595 * successful return value.
596 */
597 #define FTMN_SET_CHECK_RES_NOT_ZERO(ftmn, incr, res) \
598 __ftmn_set_check_res_not_zero((ftmn), (incr), (res))
599
ftmn_set_check_res_memcmp(struct ftmn * ftmn,enum ftmn_incr incr,ftmn_memcmp_t my_memcmp,const void * p1,const void * p2,size_t nb)600 static inline int ftmn_set_check_res_memcmp(struct ftmn *ftmn,
601 enum ftmn_incr incr,
602 ftmn_memcmp_t my_memcmp,
603 const void *p1, const void *p2,
604 size_t nb)
605 {
606 int res = my_memcmp(p1, p2, nb);
607
608 if (IS_ENABLED(CFG_FAULT_MITIGATION))
609 ___ftmn_set_check_res_memcmp(&ftmn->check, incr, res,
610 my_memcmp, p1, p2, nb);
611
612 return res;
613 }
614
615 /*
616 * FTMN_STEP_COUNT() - Calculate total step count
617 *
618 * Takes variable number of arguments, up to a total of 6. Where arg0
619 * is the number of times the counter has been increased by FTMN_INCR0,
620 * arg1 FTMN_INCR1 and so on.
621 */
622 #define FTMN_STEP_COUNT(...) \
623 __ftmn_step_count(__ftmn_args_count(__VA_ARGS__), __VA_ARGS__)
624
625 /*
626 * ftmn_checkpoint() - Add a checkpoint
627 * @ftmn: The local struct ftmn
628 * @incr: Value to increase the checked state with
629 *
630 * Adds a checkpoint by increasing the internal checked state. This
631 * can be checked at a later point in the calling function, for instance
632 * with ftmn_return_res().
633 */
ftmn_checkpoint(struct ftmn * ftmn,enum ftmn_incr incr)634 static inline void ftmn_checkpoint(struct ftmn *ftmn, enum ftmn_incr incr)
635 {
636 if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
637 /*
638 * The purpose of the barriers is to prevent the compiler
639 * from optimizing this increase to some other location
640 * in the calling function.
641 */
642 barrier();
643 ftmn->check.steps += incr;
644 barrier();
645 }
646 }
647
648 /*
649 * ftmn_expect_state() - Check expected state
650 * @ftmn: The local struct ftmn
651 * @incr: Value to increase the checked state with
652 * @steps: Expected accumulated steps
653 * @res: Expected saved result or return value
654 *
655 * This is a more advanced version of ftmn_checkpoint() which before
656 * increasing the accumulated steps first checks the accumulated steps and
657 * saved result or return value.
658 */
ftmn_expect_state(struct ftmn * ftmn,enum ftmn_incr incr,unsigned long steps,unsigned long res)659 static inline void ftmn_expect_state(struct ftmn *ftmn,
660 enum ftmn_incr incr, unsigned long steps,
661 unsigned long res)
662 {
663 if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
664 assert((ftmn->check.res ^ FTMN_DEFAULT_HASH) == res);
665 assert(ftmn->check.steps == steps);
666
667 ___ftmn_expect_state(&ftmn->check, incr, steps, res);
668 }
669 }
670
671 /*
672 * ftmn_return_res() - Check and return result
673 * @ftmn: The local struct ftmn
674 * @steps: Expected accumulated steps
675 * @res: Expected saved result or return value
676 *
677 * Checks that the internal accumulated state matches the supplied @steps
678 * and that the saved result or return value matches the supplied one.
679 *
680 * Returns @res.
681 */
ftmn_return_res(struct ftmn * ftmn,unsigned long steps,unsigned long res)682 static inline unsigned long ftmn_return_res(struct ftmn *ftmn,
683 unsigned long steps,
684 unsigned long res)
685 {
686 /*
687 * We're expecting that the compiler does a tail call optimization
688 * allowing ___ftmn_return_res() to have full control over the
689 * returned value. Thus trying to reduce the window where the
690 * return value can be tampered with.
691 */
692 if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
693 assert((ftmn->check.res ^ FTMN_DEFAULT_HASH) == res);
694 assert(ftmn->check.steps == steps);
695
696 return ___ftmn_return_res(&ftmn->check, steps, res);
697 }
698 return res;
699 }
700 #endif /*__FAULT_MITIGATION_H*/
701