1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2022, Linaro Limited
4 */
5 #ifndef __FAULT_MITIGATION_H
6 #define __FAULT_MITIGATION_H
7
8 #include <assert.h>
9 #include <config.h>
10 #include <string.h>
11 #include <util.h>
12
13 #ifdef __KERNEL__
14 #include <kernel/panic.h>
15 #include <kernel/thread.h>
16 #else
17 #include <tee_api.h>
18 #endif
19
20 /*
21 * Fault migitigation helpers to make successful Hardware Fault Attacks
22 * harder to achieve. The paper [1] by Riscure gives background to the
23 * problem.
24 *
25 * These helpers aim to make it hard for a single glitch attack to succeed
26 * while the protected function or one of the ftmn_*() functions are
27 * executed.
28 *
29 * To have something to work with we assume that a single glitch may affect
30 * a few instructions in sequence to do nothing or to corrupt the content
31 * of a few registers.
32 *
33 * Using the terminology from [1] we are implementing the following patterns:
34 * 3 FAULT.VALUE.CHECK
35 * 5 FAULT.DECISION.CHECK
36 * 9 FAULT.FLOW.CONTROL
37 *
38 * Additionally are the following patterns also acknowledged with a few
39 * comments:
40 * 1. FAULT.CONSTANT.CODING
41 * Zero is normally a success code in OP-TEE so special functions are
42 * added to record anything but a zero result.
43 * 8. FAULT.NESTED.CHECK
44 * The linked calls performed by for instance FTMN_CALL_FUNC() addresses
45 * this by relying on the called function to update a state in
46 * struct ftmn_func_arg which is checked when the function has returned.
47 * 11. FAULT.PENALTY
48 * This is implicit since we're normally trying to protect things post
49 * boot and booting takes quite some time.
50 *
51 * [1] https://web.archive.org/web/20220616035354/https://www.riscure.com/uploads/2020/05/Riscure_Whitepaper_Fault_Mitigation_Patterns_final.pdf
52 */
53
54 #include <stdint.h>
55 #include <stdbool.h>
56
57 /*
58 * struct ftmn_check - track current checked state
59 * @steps: accumulated checkpoints
60 * @res: last stored result or return value
61 *
62 * While a function is executed it can update its state as a way of keeping
63 * track of important passages inside the function. When the function
64 * returns with for instance ftmn_return_res() it is checked that the
65 * accumulated state matches the expected state.
66 *
67 * @res is xored with FTMN_DEFAULT_HASH in order to retrieve the saved
68 * result or return value.
69 */
70 struct ftmn_check {
71 unsigned long steps;
72 unsigned long res;
73 };
74
75 /*
76 * struct ftmn_func_arg - track a called function
77 * @hash: xor bitmask
78 * @res: stored result xored with @hash
79 *
80 * When the call of a function is tracked @hash is initialized to hash of
81 * caller xored with hash of called function. Before the called function
82 * updates @res it first xors @hash with its own hash, which is supposed to
83 * restore @hash to the hash of the calling function. This allows the
84 * calling function to confirm that the correct function has been called.
85 */
86 struct ftmn_func_arg {
87 unsigned long hash;
88 unsigned long res;
89 };
90
91 /*
92 * struct ftmn - link a tracked call chain
93 * @check: local checked state
94 * @arg: argument for the next called tracked function
95 * @saved_arg: pointer to an optional argument passed to this function
96 * @arg_pp: cached return value from __ftmn_get_tsd_func_arg_pp()
97 * @my_hash: the hash of the calling function
98 * @called_hash:the hash of the called function
99 *
100 * In order to maintain the linked call chain of tracked functions the
101 * struct ftmn_func_arg passed to this function is saved in @saved_arg
102 * before updating the argument pointer with @arg.
103 */
104 struct ftmn {
105 struct ftmn_check check;
106 struct ftmn_func_arg arg;
107 struct ftmn_func_arg *saved_arg;
108 struct ftmn_func_arg **arg_pp;
109 unsigned long my_hash;
110 unsigned long called_hash;
111 };
112
113 /*
114 * enum ftmn_incr - increase counter values
115 *
116 * Prime numbers to be used when increasing the accumulated state.
117 * Different increase counters can be used to keep apart different
118 * checkpoints.
119 */
120 enum ftmn_incr {
121 FTMN_INCR0 = 7873,
122 FTMN_INCR1 = 7877,
123 FTMN_INCR2 = 7879,
124 FTMN_INCR3 = 7883,
125 FTMN_INCR4 = 7901,
126 FTMN_INCR5 = 7907,
127 FTMN_INCR_RESERVED = 7919,
128 };
129
130 typedef int (*ftmn_memcmp_t)(const void *p1, const void *p2, size_t nb);
131
132 /* The default hash used when xoring the result in struct ftmn_check */
133 #ifdef __ILP32__
134 #define FTMN_DEFAULT_HASH 0x9c478bf6UL
135 #else
136 #define FTMN_DEFAULT_HASH 0xc478bf63e9500cb5UL
137 #endif
138
139 /*
140 * FTMN_PANIC() - FTMN specific panic function
141 *
142 * This function is called whenever the FTMN function detects an
143 * inconsistency. An inconsistency is able to occur if the system is
144 * subject to a fault injection attack, in this case doing a panic() isn't
145 * an extreme measure.
146 */
147 #ifdef __KERNEL__
148 #define FTMN_PANIC() panic();
149 #else
150 #define FTMN_PANIC() TEE_Panic(0);
151 #endif
152
153 #define __FTMN_MAX_FUNC_NAME_LEN 256
154
155 #define __FTMN_FUNC_BYTE(f, o, l) ((o) < (l) ? (uint8_t)(f)[(o)] : 0)
156
157 #define __FTMN_GET_FUNC_U64(f, o, l) \
158 (SHIFT_U64(__FTMN_FUNC_BYTE((f), (o), (l)), 0) | \
159 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 1, (l)), 8) | \
160 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 2, (l)), 16) | \
161 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 3, (l)), 24) | \
162 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 4, (l)), 32) | \
163 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 5, (l)), 40) | \
164 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 6, (l)), 48) | \
165 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 7, (l)), 56))
166
167 #define __FTMN_FUNC_HASH32(f, o, l) \
168 (__FTMN_GET_FUNC_U64((f), (o), (l)) ^ \
169 __FTMN_GET_FUNC_U64((f), (o) + 8, (l)))
170
171 #define __FTMN_FUNC_HASH16(f, o, l) \
172 (__FTMN_FUNC_HASH32((f), (o), (l)) ^ \
173 __FTMN_FUNC_HASH32((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 16, (l)))
174
175 #define __FTMN_FUNC_HASH8(f, o, l) \
176 (__FTMN_FUNC_HASH16((f), (o), (l)) ^ \
177 __FTMN_FUNC_HASH16((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 8, (l)))
178
179 #define __FTMN_FUNC_HASH4(f, o, l) \
180 (__FTMN_FUNC_HASH8((f), (o), (l)) ^ \
181 __FTMN_FUNC_HASH8((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 4, (l)))
182
183 #define __FTMN_FUNC_HASH2(f, l) \
184 (__FTMN_FUNC_HASH4(f, 0, l) ^ \
185 __FTMN_FUNC_HASH4(f, __FTMN_MAX_FUNC_NAME_LEN / 2, l))
186
187 #ifdef __ILP32__
188 #define __FTMN_FUNC_HASH(f, l) \
189 (unsigned long)(__FTMN_FUNC_HASH2((f), (l)) ^ \
190 (__FTMN_FUNC_HASH2((f), (l)) >> 32))
191 #else
192 #define __FTMN_FUNC_HASH(f, l) (unsigned long)__FTMN_FUNC_HASH2((f), (l))
193 #endif
194
195 #define __ftmn_step_count_1(c0) ((c0) * FTMN_INCR0)
196 #define __ftmn_step_count_2(c0, c1) \
197 (__ftmn_step_count_1(c0) + (c1) * FTMN_INCR1)
198 #define __ftmn_step_count_3(c0, c1, c2) \
199 (__ftmn_step_count_2(c0, c1) + (c2) * FTMN_INCR2)
200 #define __ftmn_step_count_4(c0, c1, c2, c3) \
201 (__ftmn_step_count_3(c0, c1, c2) + (c3) * FTMN_INCR3)
202 #define __ftmn_step_count_5(c0, c1, c2, c3, c4) \
203 (__ftmn_step_count_4(c0, c1, c2, c3) + (c4) * FTMN_INCR4)
204 #define __ftmn_step_count_6(c0, c1, c2, c3, c4, c5) \
205 (__ftmn_step_count_5(c0, c1, c2, c3, c4) + (c5) * FTMN_INCR5)
206 #define ___ftmn_args_count(_0, _1, _2, _3, _4, _5, x, ...) x
207 #define __ftmn_args_count(...) \
208 ___ftmn_args_count(__VA_ARGS__, 6, 5, 4, 3, 2, 1, 0)
209 #define ___ftmn_step_count(count, ...) __ftmn_step_count_ ## count(__VA_ARGS__)
210 #define __ftmn_step_count(count, ...) ___ftmn_step_count(count, __VA_ARGS__)
211
212 unsigned long ___ftmn_return_res(struct ftmn_check *check, unsigned long steps,
213 unsigned long res);
214 void ___ftmn_expect_state(struct ftmn_check *check, enum ftmn_incr incr,
215 unsigned long steps, unsigned long res);
216
217 void ___ftmn_callee_done(struct ftmn_func_arg *arg, unsigned long my_hash,
218 unsigned long res);
219 void ___ftmn_callee_done_not_zero(struct ftmn_func_arg *arg,
220 unsigned long my_hash,
221 unsigned long res);
222 void ___ftmn_callee_done_memcmp(struct ftmn_func_arg *arg,
223 unsigned long my_hash, int res,
224 ftmn_memcmp_t my_memcmp,
225 const void *p1, const void *p2, size_t nb);
226 void ___ftmn_callee_done_check(struct ftmn_func_arg *arg, unsigned long my_hash,
227 struct ftmn_check *check, enum ftmn_incr incr,
228 unsigned long steps, unsigned long res);
229
230 void ___ftmn_callee_update_not_zero(struct ftmn_func_arg *arg,
231 unsigned long res);
232
233 void ___ftmn_set_check_res(struct ftmn_check *check, enum ftmn_incr incr,
234 unsigned long res);
235 void ___ftmn_set_check_res_not_zero(struct ftmn_check *check,
236 enum ftmn_incr incr,
237 unsigned long res);
238 void ___ftmn_set_check_res_memcmp(struct ftmn_check *check, enum ftmn_incr incr,
239 int res, ftmn_memcmp_t my_memcmp,
240 const void *p1, const void *p2, size_t nb);
241
242 void ___ftmn_copy_linked_call_res(struct ftmn_check *check, enum ftmn_incr incr,
243 struct ftmn_func_arg *arg, unsigned long res);
244
245
246 #ifndef __KERNEL__
247 extern struct ftmn_func_arg *__ftmn_global_func_arg;
248 #endif
249
__ftmn_get_tsd_func_arg_pp(void)250 static inline struct ftmn_func_arg **__ftmn_get_tsd_func_arg_pp(void)
251 {
252 #if defined(CFG_FAULT_MITIGATION) && defined(__KERNEL__)
253 return &thread_get_tsd()->ftmn_arg;
254 #elif defined(CFG_FAULT_MITIGATION)
255 return &__ftmn_global_func_arg;
256 #else
257 return NULL;
258 #endif
259 }
260
__ftmn_get_tsd_func_arg(void)261 static inline struct ftmn_func_arg *__ftmn_get_tsd_func_arg(void)
262 {
263 struct ftmn_func_arg **pp = __ftmn_get_tsd_func_arg_pp();
264
265 if (!pp)
266 return NULL;
267
268 return *pp;
269 }
270
__ftmn_push_linked_call(struct ftmn * ftmn,unsigned long my_hash,unsigned long called_hash)271 static inline void __ftmn_push_linked_call(struct ftmn *ftmn,
272 unsigned long my_hash,
273 unsigned long called_hash)
274 {
275 struct ftmn_func_arg **arg_pp = __ftmn_get_tsd_func_arg_pp();
276
277 if (arg_pp) {
278 ftmn->arg_pp = arg_pp;
279 ftmn->my_hash = my_hash;
280 ftmn->called_hash = called_hash;
281 ftmn->saved_arg = *ftmn->arg_pp;
282 *ftmn->arg_pp = &ftmn->arg;
283 ftmn->arg.hash = my_hash;
284 }
285 }
286
__ftmn_pop_linked_call(struct ftmn * ftmn)287 static inline void __ftmn_pop_linked_call(struct ftmn *ftmn)
288 {
289 if (ftmn->arg_pp)
290 *ftmn->arg_pp = ftmn->saved_arg;
291 }
292
__ftmn_copy_linked_call_res(struct ftmn * f,enum ftmn_incr incr,unsigned long res)293 static inline void __ftmn_copy_linked_call_res(struct ftmn *f,
294 enum ftmn_incr incr,
295 unsigned long res)
296 {
297 if (f->arg_pp) {
298 assert(f->arg.hash == (f->my_hash ^ f->called_hash));
299 assert(&f->arg == *f->arg_pp);
300 assert((f->arg.hash ^ f->arg.res) == res);
301 ___ftmn_copy_linked_call_res(&f->check, incr, &f->arg, res);
302 }
303 }
304
__ftmn_calle_swap_hash(struct ftmn_func_arg * arg,unsigned long my_old_hash,unsigned long my_new_hash)305 static inline void __ftmn_calle_swap_hash(struct ftmn_func_arg *arg,
306 unsigned long my_old_hash,
307 unsigned long my_new_hash)
308 {
309 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
310 arg->hash ^= my_old_hash ^ my_new_hash;
311 }
312
__ftmn_callee_done(struct ftmn_func_arg * arg,unsigned long my_hash,unsigned long res)313 static inline void __ftmn_callee_done(struct ftmn_func_arg *arg,
314 unsigned long my_hash, unsigned long res)
315 {
316 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
317 ___ftmn_callee_done(arg, my_hash, res);
318 }
319
__ftmn_callee_done_not_zero(struct ftmn_func_arg * arg,unsigned long hash,unsigned long res)320 static inline void __ftmn_callee_done_not_zero(struct ftmn_func_arg *arg,
321 unsigned long hash,
322 unsigned long res)
323 {
324 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
325 ___ftmn_callee_done_not_zero(arg, hash, res);
326 }
327
328 static inline int
__ftmn_callee_done_memcmp(struct ftmn_func_arg * arg,unsigned long hash,ftmn_memcmp_t my_memcmp,const void * p1,const void * p2,size_t nb)329 __ftmn_callee_done_memcmp(struct ftmn_func_arg *arg, unsigned long hash,
330 ftmn_memcmp_t my_memcmp,
331 const void *p1, const void *p2, size_t nb)
332 {
333 int res = my_memcmp(p1, p2, nb);
334
335 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
336 ___ftmn_callee_done_memcmp(arg, hash, res, my_memcmp,
337 p1, p2, nb);
338
339 return res;
340 }
341
__ftmn_callee_done_check(struct ftmn * ftmn,unsigned long my_hash,enum ftmn_incr incr,unsigned long steps,unsigned long res)342 static inline void __ftmn_callee_done_check(struct ftmn *ftmn,
343 unsigned long my_hash,
344 enum ftmn_incr incr,
345 unsigned long steps,
346 unsigned long res)
347 {
348 if (IS_ENABLED(CFG_FAULT_MITIGATION))
349 ___ftmn_callee_done_check(__ftmn_get_tsd_func_arg(), my_hash,
350 &ftmn->check, incr, steps, res);
351 }
352
__ftmn_callee_update_not_zero(struct ftmn_func_arg * arg,unsigned long res)353 static inline void __ftmn_callee_update_not_zero(struct ftmn_func_arg *arg,
354 unsigned long res)
355 {
356 if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
357 ___ftmn_callee_update_not_zero(arg, res);
358 }
359
__ftmn_set_check_res(struct ftmn * ftmn,enum ftmn_incr incr,unsigned long res)360 static inline void __ftmn_set_check_res(struct ftmn *ftmn, enum ftmn_incr incr,
361 unsigned long res)
362 {
363 if (IS_ENABLED(CFG_FAULT_MITIGATION))
364 ___ftmn_set_check_res(&ftmn->check, incr, res);
365 }
366
__ftmn_set_check_res_not_zero(struct ftmn * ftmn,enum ftmn_incr incr,unsigned long res)367 static inline void __ftmn_set_check_res_not_zero(struct ftmn *ftmn,
368 enum ftmn_incr incr,
369 unsigned long res)
370 {
371 if (IS_ENABLED(CFG_FAULT_MITIGATION))
372 ___ftmn_set_check_res_not_zero(&ftmn->check, incr, res);
373 }
374
375
376
377 /*
378 * FTMN_FUNC_HASH() - "hash" a function name
379 *
380 * Function names are "hashed" into an unsigned long. The "hashing" is done
381 * by xoring each 32/64 bit word of the function name producing a bit
382 * pattern that should be mostly unique for each function. Only the first
383 * 256 characters of the name are used when xoring as this is expected to
384 * be optimized to be calculated when compiling the source code in order to
385 * minimize the overhead.
386 */
387 #define FTMN_FUNC_HASH(name) __FTMN_FUNC_HASH(name, sizeof(name))
388
389 /*
390 * FTMN_PUSH_LINKED_CALL() - push call into a linked call chain
391 * @ftmn: The local struct ftmn
392 * @called_func_hash: The hash of the called function
393 *
394 * Inserts a call into a linked call chain or starts a new call chain if
395 * the passed struct ftmn_func_arg pointer was NULL.
396 *
397 * Each FTMN_PUSH_LINKED_CALL() is supposed to be matched by a
398 * FTMN_POP_LINKED_CALL().
399 */
400 #define FTMN_PUSH_LINKED_CALL(ftmn, called_func_hash) \
401 __ftmn_push_linked_call((ftmn), FTMN_FUNC_HASH(__func__), \
402 (called_func_hash))
403
404 /*
405 * FTMN_SET_CHECK_RES_FROM_CALL() - copy the result from a linked call
406 * @ftmn: The struct ftmn used during the linked call
407 * @incr: Value to increase the checked state with
408 * @res: Returned result to be match against the saved/copied result
409 *
410 * This macro is called just after a checked linked function has returned.
411 * The return value from the function is copied from the struct ftmn_func_arg
412 * passed to the called function into the local checked state. The checked
413 * state is increased with @incr. @res is checked against the saved result
414 * of the called function.
415 */
416 #define FTMN_SET_CHECK_RES_FROM_CALL(ftmn, incr, res) \
417 __ftmn_copy_linked_call_res((ftmn), (incr), (res))
418
419 /*
420 * FTMN_POP_LINKED_CALL() - remove a call from a linked call chain
421 * @ftmn: The local struct ftmn
422 *
423 * Supposed to match a call to FTMN_PUSH_LINKED_CALL()
424 */
425 #define FTMN_POP_LINKED_CALL(ftmn) __ftmn_pop_linked_call((ftmn))
426
427 /*
428 * FTMN_CALL_FUNC() - Do a linked call to a function
429 * @res: Variable to be assigned the result of the called function
430 * @ftmn: The local struct ftmn
431 * @incr: Value to increase the checked state with
432 * @func: Function to be called
433 * @...: Arguments to pass to @func
434 *
435 * This macro can be used to make a linked call to another function, the
436 * callee. This macro depends on the callee to always update the struct
437 * ftmn_func_arg (part of struct ftmn) even when returning an error.
438 *
439 * Note that in the cases where the callee may skip updating the struct
440 * ftmn_func_arg this macro cannot be used as
441 * FTMN_SET_CHECK_RES_FROM_CALL() would cause a panic due to mismatching
442 * return value and saved result.
443 */
444 #define FTMN_CALL_FUNC(res, ftmn, incr, func, ...) \
445 do { \
446 FTMN_PUSH_LINKED_CALL((ftmn), FTMN_FUNC_HASH(#func)); \
447 (res) = func(__VA_ARGS__); \
448 FTMN_SET_CHECK_RES_FROM_CALL((ftmn), (incr), (res)); \
449 FTMN_POP_LINKED_CALL((ftmn)); \
450 } while (0)
451
452 /*
453 * FTMN_CALLEE_DONE() - Record result of callee
454 * @res: Result or return value
455 *
456 * The passed result will be stored in the struct ftmn_func_arg struct
457 * supplied by the caller. This function must only be called once by the
458 * callee.
459 *
460 * Note that this function is somewhat dangerous as any passed value will
461 * be stored so if the value has been tampered with there is no additional
462 * redundant checks to rely on.
463 */
464 #define FTMN_CALLEE_DONE(res) \
465 __ftmn_callee_done(__ftmn_get_tsd_func_arg(), \
466 FTMN_FUNC_HASH(__func__), (res))
467 /*
468 * FTMN_CALLEE_DONE_NOT_ZERO() - Record non-zero result of callee
469 * @res: Result or return value
470 *
471 * The passed result will be stored in the struct ftmn_func_arg struct
472 * supplied by the caller. This function must only be called once by the
473 * callee.
474 *
475 * Note that this function is somewhat dangerous as any passed value will
476 * be stored so if the value has been tampered with there is no additional
477 * redundant checks to rely on. However, there are extra checks against
478 * unintentionally storing a zero which often is interpreted as a
479 * successful return value.
480 */
481 #define FTMN_CALLEE_DONE_NOT_ZERO(res) \
482 __ftmn_callee_done_not_zero(__ftmn_get_tsd_func_arg(), \
483 FTMN_FUNC_HASH(__func__), (res))
484
485 /*
486 * FTMN_CALLEE_DONE_CHECK() - Record result of callee with checked state
487 * @ftmn: The local struct ftmn
488 * @incr: Value to increase the checked state with
489 * @exp_steps: Expected recorded checkpoints
490 * @res: Result or return value
491 *
492 * The passed result will be stored in the struct ftmn_func_arg struct
493 * supplied by the caller. This function must only be called once by the
494 * callee.
495 *
496 * @res is double checked against the value stored in local checked state.
497 * @exp_steps is checked against the locate checked state. The local
498 * checked state is increased by @incr.
499 */
500 #define FTMN_CALLEE_DONE_CHECK(ftmn, incr, exp_steps, res) \
501 __ftmn_callee_done_check((ftmn), FTMN_FUNC_HASH(__func__), \
502 (incr), (exp_steps), (res))
503
504 /*
505 * FTMN_CALLEE_DONE_MEMCMP() - Record result of memcmp() in a callee
506 * @my_memcmp: Function pointer of custom memcmp()
507 * @p1: Pointer to first buffer
508 * @p2: Pointer to second buffer
509 * @nb: Number of bytes
510 *
511 * The result from the mem compare is saved in the local checked state.
512 * This function must only be called once by the callee.
513 */
514 #define FTMN_CALLEE_DONE_MEMCMP(my_memcmp, p1, p2, nb) \
515 __ftmn_callee_done_memcmp(__ftmn_get_tsd_func_arg(), \
516 FTMN_FUNC_HASH(__func__), (my_memcmp), \
517 (p1), (p2), (nb))
518
519 /*
520 * FTMN_CALLEE_UPDATE_NOT_ZERO() - Update the result of a callee with a
521 * non-zero value
522 * @res: Result or return value
523 *
524 * The passed result will be stored in the struct ftmn_func_arg struct
525 * supplied by the caller. This function can be called any number of times
526 * by the callee, provided that one of the FTMN_CALLEE_DONE_XXX() functions
527 * has been called first.
528 *
529 * Note that this function is somewhat dangerous as any passed value will
530 * be stored so if the value has been tampered with there is no additional
531 * redundant checks to rely on. However, there are extra checks against
532 * unintentionally storing a zero which often is interpreted as a
533 * successful return value.
534 */
535 #define FTMN_CALLEE_UPDATE_NOT_ZERO(res) \
536 __ftmn_callee_update_not_zero(__ftmn_get_tsd_func_arg(), res)
537
538 /*
539 * FTMN_CALLEE_SWAP_HASH() - Remove old hash and add new hash
540 * @my_old_hash: The old hash to remove
541 *
542 * This macro replaces the old expected function hash with the hash of the
543 * current function.
544 *
545 * If a function is called using an alias the caller uses the hash of the
546 * alias not the real function name. This hash is recoded in the field
547 * "hash" in struct ftmn_func_arg which can be found with
548 * __ftmn_get_tsd_func_arg().
549 *
550 * The FTMN_CALLE_* functions only work with the real function name so the
551 * old hash must be removed and replaced with the new for the calling
552 * function to be able to verify the result.
553 */
554 #define FTMN_CALLEE_SWAP_HASH(my_old_hash) \
555 __ftmn_calle_swap_hash(__ftmn_get_tsd_func_arg(), \
556 (my_old_hash), FTMN_FUNC_HASH(__func__))
557
558 /*
559 * FTMN_SET_CHECK_RES() - Records a result in local checked state
560 * @ftmn: The local struct ftmn
561 * @incr: Value to increase the checked state with
562 * @res: Result or return value
563 *
564 * Note that this function is somewhat dangerous as any passed value will
565 * be stored so if the value has been tampered with there is no additional
566 * redundant checks to rely on.
567 */
568 #define FTMN_SET_CHECK_RES(ftmn, incr, res) \
569 __ftmn_set_check_res((ftmn), (incr), (res))
570
571 /*
572 * FTMN_SET_CHECK_RES_NOT_ZERO() - Records a non-zero result in local checked
573 * state
574 * @ftmn: The local struct ftmn
575 * @incr: Value to increase the checked state with
576 * @res: Result or return value
577 *
578 * Note that this function is somewhat dangerous as any passed value will
579 * be stored so if the value has been tampered with there is no additional
580 * redundant checks to rely on. However, there are extra checks against
581 * unintentionally storing a zero which often is interpreted as a
582 * successful return value.
583 */
584 #define FTMN_SET_CHECK_RES_NOT_ZERO(ftmn, incr, res) \
585 __ftmn_set_check_res_not_zero((ftmn), (incr), (res))
586
ftmn_set_check_res_memcmp(struct ftmn * ftmn,enum ftmn_incr incr,ftmn_memcmp_t my_memcmp,const void * p1,const void * p2,size_t nb)587 static inline int ftmn_set_check_res_memcmp(struct ftmn *ftmn,
588 enum ftmn_incr incr,
589 ftmn_memcmp_t my_memcmp,
590 const void *p1, const void *p2,
591 size_t nb)
592 {
593 int res = my_memcmp(p1, p2, nb);
594
595 if (IS_ENABLED(CFG_FAULT_MITIGATION))
596 ___ftmn_set_check_res_memcmp(&ftmn->check, incr, res,
597 my_memcmp, p1, p2, nb);
598
599 return res;
600 }
601
602 /*
603 * FTMN_STEP_COUNT() - Calculate total step count
604 *
605 * Takes variable number of arguments, up to a total of 6. Where arg0
606 * is the number of times the counter has been increased by FTMN_INCR0,
607 * arg1 FTMN_INCR1 and so on.
608 */
609 #define FTMN_STEP_COUNT(...) \
610 __ftmn_step_count(__ftmn_args_count(__VA_ARGS__), __VA_ARGS__)
611
612 /*
613 * ftmn_checkpoint() - Add a checkpoint
614 * @ftmn: The local struct ftmn
615 * @incr: Value to increase the checked state with
616 *
617 * Adds a checkpoint by increasing the internal checked state. This
618 * can be checked at a later point in the calling function, for instance
619 * with ftmn_return_res().
620 */
ftmn_checkpoint(struct ftmn * ftmn,enum ftmn_incr incr)621 static inline void ftmn_checkpoint(struct ftmn *ftmn, enum ftmn_incr incr)
622 {
623 if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
624 /*
625 * The purpose of the barriers is to prevent the compiler
626 * from optimizing this increase to some other location
627 * in the calling function.
628 */
629 barrier();
630 ftmn->check.steps += incr;
631 barrier();
632 }
633 }
634
635 /*
636 * ftmn_expect_state() - Check expected state
637 * @ftmn: The local struct ftmn
638 * @incr: Value to increase the checked state with
639 * @steps: Expected accumulated steps
640 * @res: Expected saved result or return value
641 *
642 * This is a more advanced version of ftmn_checkpoint() which before
643 * increasing the accumulated steps first checks the accumulated steps and
644 * saved result or return value.
645 */
ftmn_expect_state(struct ftmn * ftmn,enum ftmn_incr incr,unsigned long steps,unsigned long res)646 static inline void ftmn_expect_state(struct ftmn *ftmn,
647 enum ftmn_incr incr, unsigned long steps,
648 unsigned long res)
649 {
650 if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
651 assert((ftmn->check.res ^ FTMN_DEFAULT_HASH) == res);
652 assert(ftmn->check.steps == steps);
653
654 ___ftmn_expect_state(&ftmn->check, incr, steps, res);
655 }
656 }
657
658 /*
659 * ftmn_return_res() - Check and return result
660 * @ftmn: The local struct ftmn
661 * @steps: Expected accumulated steps
662 * @res: Expected saved result or return value
663 *
664 * Checks that the internal accumulated state matches the supplied @steps
665 * and that the saved result or return value matches the supplied one.
666 *
667 * Returns @res.
668 */
ftmn_return_res(struct ftmn * ftmn,unsigned long steps,unsigned long res)669 static inline unsigned long ftmn_return_res(struct ftmn *ftmn,
670 unsigned long steps,
671 unsigned long res)
672 {
673 /*
674 * We're expecting that the compiler does a tail call optimization
675 * allowing ___ftmn_return_res() to have full control over the
676 * returned value. Thus trying to reduce the window where the
677 * return value can be tampered with.
678 */
679 if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
680 assert((ftmn->check.res ^ FTMN_DEFAULT_HASH) == res);
681 assert(ftmn->check.steps == steps);
682
683 return ___ftmn_return_res(&ftmn->check, steps, res);
684 }
685 return res;
686 }
687 #endif /*__FAULT_MITIGATION_H*/
688