xref: /optee_os/lib/libutils/ext/include/fault_mitigation.h (revision 32b3180828fa15a49ccc86ecb4be9d274c140c89)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2022, Linaro Limited
4  */
5 #ifndef __FAULT_MITIGATION_H
6 #define __FAULT_MITIGATION_H
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <string.h>
11 #include <util.h>
12 
13 #ifdef __KERNEL__
14 #include <kernel/panic.h>
15 #include <kernel/thread.h>
16 #else
17 #include <tee_api.h>
18 #endif
19 
20 /*
21  * Fault migitigation helpers to make successful Hardware Fault Attacks
22  * harder to achieve. The paper [1] by Riscure gives background to the
23  * problem.
24  *
25  * These helpers aim to make it hard for a single glitch attack to succeed
26  * while the protected function or one of the ftmn_*() functions are
27  * executed.
28  *
29  * To have something to work with we assume that a single glitch may affect
30  * a few instructions in sequence to do nothing or to corrupt the content
31  * of a few registers.
32  *
33  * Using the terminology from [1] we are implementing the following patterns:
34  * 3 FAULT.VALUE.CHECK
35  * 5 FAULT.DECISION.CHECK
36  * 9 FAULT.FLOW.CONTROL
37  *
38  * Additionally are the following patterns also acknowledged with a few
39  * comments:
40  * 1. FAULT.CONSTANT.CODING
41  *	Zero is normally a success code in OP-TEE so special functions are
42  *	added to record anything but a zero result.
43  * 8. FAULT.NESTED.CHECK
44  *	The linked calls performed by for instance FTMN_CALL_FUNC() addresses
45  *	this by relying on the called function to update a state in
46  *	struct ftmn_func_arg which is checked when the function has returned.
47  * 11. FAULT.PENALTY
48  *	This is implicit since we're normally trying to protect things post
49  *	boot and booting takes quite some time.
50  *
51  * [1] https://web.archive.org/web/20220616035354/https://www.riscure.com/uploads/2020/05/Riscure_Whitepaper_Fault_Mitigation_Patterns_final.pdf
52  */
53 
54 #include <stdint.h>
55 #include <stdbool.h>
56 
57 /*
58  * struct ftmn_check - track current checked state
59  * @steps:	accumulated checkpoints
60  * @res:	last stored result or return value
61  *
62  * While a function is executed it can update its state as a way of keeping
63  * track of important passages inside the function. When the function
64  * returns with for instance ftmn_return_res() it is checked that the
65  * accumulated state matches the expected state.
66  *
67  * @res is xored with FTMN_DEFAULT_HASH in order to retrieve the saved
68  * result or return value.
69  */
70 struct ftmn_check {
71 	unsigned long steps;
72 	unsigned long res;
73 };
74 
75 /*
76  * struct ftmn_func_arg - track a called function
77  * @hash:	xor bitmask
78  * @res:	stored result xored with @hash
79  *
80  * When the call of a function is tracked @hash is initialized to hash of
81  * caller xored with hash of called function. Before the called function
82  * updates @res it first xors @hash with its own hash, which is supposed to
83  * restore @hash to the hash of the calling function. This allows the
84  * calling function to confirm that the correct function has been called.
85  */
86 struct ftmn_func_arg {
87 	unsigned long hash;
88 	unsigned long res;
89 };
90 
91 /*
92  * struct ftmn - link a tracked call chain
93  * @check:	local checked state
94  * @arg:	argument for the next called tracked function
95  * @saved_arg:	pointer to an optional argument passed to this function
96  * @arg_pp:	cached return value from __ftmn_get_tsd_func_arg_pp()
97  * @my_hash:	the hash of the calling function
98  * @called_hash:the hash of the called function
99  *
100  * In order to maintain the linked call chain of tracked functions the
101  * struct ftmn_func_arg passed to this function is saved in @saved_arg
102  * before updating the argument pointer with @arg.
103  */
104 struct ftmn {
105 	struct ftmn_check check;
106 	struct ftmn_func_arg arg;
107 	struct ftmn_func_arg *saved_arg;
108 	struct ftmn_func_arg **arg_pp;
109 	unsigned long my_hash;
110 	unsigned long called_hash;
111 };
112 
113 /*
114  * enum ftmn_incr - increase counter values
115  *
116  * Prime numbers to be used when increasing the accumulated state.
117  * Different increase counters can be used to keep apart different
118  * checkpoints.
119  */
120 enum ftmn_incr {
121 	FTMN_INCR0 = 7873,
122 	FTMN_INCR1 = 7877,
123 	FTMN_INCR2 = 7879,
124 	FTMN_INCR3 = 7883,
125 	FTMN_INCR4 = 7901,
126 	FTMN_INCR5 = 7907,
127 	FTMN_INCR_RESERVED = 7919,
128 };
129 
130 typedef int (*ftmn_memcmp_t)(const void *p1, const void *p2, size_t nb);
131 
132 /* The default hash used when xoring the result in struct ftmn_check */
133 #ifdef __ILP32__
134 #define FTMN_DEFAULT_HASH	0x9c478bf6UL
135 #else
136 #define FTMN_DEFAULT_HASH	0xc478bf63e9500cb5UL
137 #endif
138 
139 /*
140  * FTMN_PANIC() - FTMN specific panic function
141  *
142  * This function is called whenever the FTMN function detects an
143  * inconsistency. An inconsistency is able to occur if the system is
144  * subject to a fault injection attack, in this case doing a panic() isn't
145  * an extreme measure.
146  */
147 #ifdef __KERNEL__
148 #define FTMN_PANIC()	panic();
149 #else
150 #define FTMN_PANIC()	TEE_Panic(0);
151 #endif
152 
153 #define __FTMN_MAX_FUNC_NAME_LEN	256
154 
155 #define __FTMN_FUNC_BYTE(f, o, l)	((o) < (l) ? (uint8_t)(f)[(o)] : 0)
156 
157 #define __FTMN_GET_FUNC_U64(f, o, l) \
158 	(SHIFT_U64(__FTMN_FUNC_BYTE((f), (o), (l)), 0) | \
159 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 1, (l)), 8) | \
160 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 2, (l)), 16) | \
161 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 3, (l)), 24) | \
162 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 4, (l)), 32) | \
163 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 5, (l)), 40) | \
164 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 6, (l)), 48) | \
165 	 SHIFT_U64(__FTMN_FUNC_BYTE((f), (o) + 7, (l)), 56))
166 
167 #define __FTMN_FUNC_HASH32(f, o, l) \
168 	(__FTMN_GET_FUNC_U64((f), (o), (l)) ^ \
169 	 __FTMN_GET_FUNC_U64((f), (o) + 8, (l)))
170 
171 #define __FTMN_FUNC_HASH16(f, o, l) \
172 	(__FTMN_FUNC_HASH32((f), (o), (l)) ^ \
173 	 __FTMN_FUNC_HASH32((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 16, (l)))
174 
175 #define __FTMN_FUNC_HASH8(f, o, l) \
176 	(__FTMN_FUNC_HASH16((f), (o), (l)) ^ \
177 	 __FTMN_FUNC_HASH16((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 8, (l)))
178 
179 #define __FTMN_FUNC_HASH4(f, o, l) \
180 	(__FTMN_FUNC_HASH8((f), (o), (l)) ^ \
181 	 __FTMN_FUNC_HASH8((f), (o) + __FTMN_MAX_FUNC_NAME_LEN / 4, (l)))
182 
183 #define __FTMN_FUNC_HASH2(f, l) \
184 	(__FTMN_FUNC_HASH4(f, 0, l) ^ \
185 	 __FTMN_FUNC_HASH4(f, __FTMN_MAX_FUNC_NAME_LEN / 2, l))
186 
187 #ifdef __ILP32__
188 #define __FTMN_FUNC_HASH(f, l) \
189 	(unsigned long)(__FTMN_FUNC_HASH2((f), (l)) ^ \
190 		        (__FTMN_FUNC_HASH2((f), (l)) >> 32))
191 #else
192 #define __FTMN_FUNC_HASH(f, l)	(unsigned long)__FTMN_FUNC_HASH2((f), (l))
193 #endif
194 
195 #define __ftmn_step_count_1(c0) ((c0) * FTMN_INCR0)
196 #define __ftmn_step_count_2(c0, c1) \
197 	(__ftmn_step_count_1(c0) + (c1) * FTMN_INCR1)
198 #define __ftmn_step_count_3(c0, c1, c2) \
199 	(__ftmn_step_count_2(c0, c1) + (c2) * FTMN_INCR2)
200 #define __ftmn_step_count_4(c0, c1, c2, c3)	\
201 	(__ftmn_step_count_3(c0, c1, c2) + (c3) * FTMN_INCR3)
202 #define __ftmn_step_count_5(c0, c1, c2, c3, c4)	\
203 	(__ftmn_step_count_4(c0, c1, c2, c3) + (c4) * FTMN_INCR4)
204 #define __ftmn_step_count_6(c0, c1, c2, c3, c4, c5)	\
205 	(__ftmn_step_count_5(c0, c1, c2, c3, c4) + (c5) * FTMN_INCR5)
206 #define ___ftmn_args_count(_0, _1, _2, _3, _4, _5, x, ...) x
207 #define __ftmn_args_count(...) \
208 	___ftmn_args_count(__VA_ARGS__, 6, 5, 4, 3, 2, 1, 0)
209 #define ___ftmn_step_count(count, ...)	__ftmn_step_count_ ## count(__VA_ARGS__)
210 #define __ftmn_step_count(count, ...)	___ftmn_step_count(count, __VA_ARGS__)
211 
212 unsigned long ___ftmn_return_res(struct ftmn_check *check, unsigned long steps,
213 				 unsigned long res);
214 void ___ftmn_expect_state(struct ftmn_check *check, enum ftmn_incr incr,
215 			  unsigned long steps, unsigned long res);
216 
217 void ___ftmn_callee_done(struct ftmn_func_arg *arg, unsigned long my_hash,
218 			 unsigned long res);
219 void ___ftmn_callee_done_not_zero(struct ftmn_func_arg *arg,
220 				  unsigned long my_hash,
221 				  unsigned long res);
222 void ___ftmn_callee_done_memcmp(struct ftmn_func_arg *arg,
223 				unsigned long my_hash, int res,
224 				ftmn_memcmp_t my_memcmp,
225 				const void *p1, const void *p2, size_t nb);
226 void ___ftmn_callee_done_check(struct ftmn_func_arg *arg, unsigned long my_hash,
227 			       struct ftmn_check *check, enum ftmn_incr incr,
228 			       unsigned long steps, unsigned long res);
229 
230 void ___ftmn_callee_update_not_zero(struct ftmn_func_arg *arg,
231 				    unsigned long res);
232 
233 void ___ftmn_set_check_res(struct ftmn_check *check, enum ftmn_incr incr,
234 			   unsigned long res);
235 void ___ftmn_set_check_res_not_zero(struct ftmn_check *check,
236 				    enum ftmn_incr incr,
237 				    unsigned long res);
238 void ___ftmn_set_check_res_memcmp(struct ftmn_check *check, enum ftmn_incr incr,
239 				  int res, ftmn_memcmp_t my_memcmp,
240 				  const void *p1, const void *p2, size_t nb);
241 
242 void ___ftmn_copy_linked_call_res(struct ftmn_check *check, enum ftmn_incr incr,
243 				  struct ftmn_func_arg *arg, unsigned long res);
244 
245 
246 #ifndef __KERNEL__
247 extern struct ftmn_func_arg *__ftmn_global_func_arg;
248 #endif
249 
250 static inline struct ftmn_func_arg **__ftmn_get_tsd_func_arg_pp(void)
251 {
252 #if defined(CFG_FAULT_MITIGATION) && defined(__KERNEL__)
253 	if (thread_get_id_may_fail() >= 0)
254 		return &thread_get_tsd()->ftmn_arg;
255 	else
256 		return &thread_get_core_local()->ftmn_arg;
257 #elif defined(CFG_FAULT_MITIGATION)
258 	return &__ftmn_global_func_arg;
259 #else
260 	return NULL;
261 #endif
262 }
263 
264 static inline struct ftmn_func_arg *__ftmn_get_tsd_func_arg(void)
265 {
266 	struct ftmn_func_arg **pp = __ftmn_get_tsd_func_arg_pp();
267 
268 	if (!pp)
269 		return NULL;
270 
271 	return *pp;
272 }
273 
274 static inline void __ftmn_push_linked_call(struct ftmn *ftmn,
275 					 unsigned long my_hash,
276 					 unsigned long called_hash)
277 {
278 	struct ftmn_func_arg **arg_pp = __ftmn_get_tsd_func_arg_pp();
279 
280 	if (arg_pp) {
281 		ftmn->arg_pp = arg_pp;
282 		ftmn->my_hash = my_hash;
283 		ftmn->called_hash = called_hash;
284 		ftmn->saved_arg = *ftmn->arg_pp;
285 		*ftmn->arg_pp = &ftmn->arg;
286 		ftmn->arg.hash = my_hash;
287 	}
288 }
289 
290 static inline void __ftmn_pop_linked_call(struct ftmn *ftmn)
291 {
292 	if (ftmn->arg_pp)
293 		*ftmn->arg_pp = ftmn->saved_arg;
294 }
295 
296 static inline void __ftmn_copy_linked_call_res(struct ftmn *f,
297 					       enum ftmn_incr incr,
298 					       unsigned long res)
299 {
300 	if (f->arg_pp) {
301 		assert(f->arg.hash == (f->my_hash ^ f->called_hash));
302 		assert(&f->arg == *f->arg_pp);
303 		assert((f->arg.hash ^ f->arg.res) == res);
304 		___ftmn_copy_linked_call_res(&f->check, incr, &f->arg, res);
305 	}
306 }
307 
308 static inline void __ftmn_calle_swap_hash(struct ftmn_func_arg *arg,
309 					  unsigned long my_old_hash,
310 					  unsigned long my_new_hash)
311 {
312 	if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
313 		arg->hash ^= my_old_hash ^ my_new_hash;
314 }
315 
316 static inline void __ftmn_callee_done(struct ftmn_func_arg *arg,
317 				      unsigned long my_hash, unsigned long res)
318 {
319 	if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
320 		___ftmn_callee_done(arg, my_hash, res);
321 }
322 
323 static inline void __ftmn_callee_done_not_zero(struct ftmn_func_arg *arg,
324 					       unsigned long hash,
325 					       unsigned long res)
326 {
327 	if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
328 		___ftmn_callee_done_not_zero(arg, hash, res);
329 }
330 
331 static inline int
332 __ftmn_callee_done_memcmp(struct ftmn_func_arg *arg, unsigned long hash,
333 			  ftmn_memcmp_t my_memcmp,
334 			  const void *p1, const void *p2, size_t nb)
335 {
336 	int res = my_memcmp(p1, p2, nb);
337 
338 	if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
339 		___ftmn_callee_done_memcmp(arg, hash, res, my_memcmp,
340 					   p1, p2, nb);
341 
342 	return res;
343 }
344 
345 static inline void __ftmn_callee_done_check(struct ftmn *ftmn,
346 					    unsigned long my_hash,
347 					    enum ftmn_incr incr,
348 					    unsigned long steps,
349 					    unsigned long res)
350 {
351 	if (IS_ENABLED(CFG_FAULT_MITIGATION))
352 		___ftmn_callee_done_check(__ftmn_get_tsd_func_arg(), my_hash,
353 					  &ftmn->check, incr, steps, res);
354 }
355 
356 static inline void __ftmn_callee_update_not_zero(struct ftmn_func_arg *arg,
357 						 unsigned long res)
358 {
359 	if (IS_ENABLED(CFG_FAULT_MITIGATION) && arg)
360 		___ftmn_callee_update_not_zero(arg, res);
361 }
362 
363 static inline void __ftmn_set_check_res(struct ftmn *ftmn, enum ftmn_incr incr,
364 				      unsigned long res)
365 {
366 	if (IS_ENABLED(CFG_FAULT_MITIGATION))
367 		___ftmn_set_check_res(&ftmn->check, incr, res);
368 }
369 
370 static inline void __ftmn_set_check_res_not_zero(struct ftmn *ftmn,
371 					       enum ftmn_incr incr,
372 					       unsigned long res)
373 {
374 	if (IS_ENABLED(CFG_FAULT_MITIGATION))
375 		___ftmn_set_check_res_not_zero(&ftmn->check, incr, res);
376 }
377 
378 
379 
380 /*
381  * FTMN_FUNC_HASH() - "hash" a function name
382  *
383  * Function names are "hashed" into an unsigned long. The "hashing" is done
384  * by xoring each 32/64 bit word of the function name producing a bit
385  * pattern that should be mostly unique for each function. Only the first
386  * 256 characters of the name are used when xoring as this is expected to
387  * be optimized to be calculated when compiling the source code in order to
388  * minimize the overhead.
389  */
390 #define FTMN_FUNC_HASH(name)	__FTMN_FUNC_HASH(name, sizeof(name))
391 
392 /*
393  * FTMN_PUSH_LINKED_CALL() - push call into a linked call chain
394  * @ftmn:		The local struct ftmn
395  * @called_func_hash:	The hash of the called function
396  *
397  * Inserts a call into a linked call chain or starts a new call chain if
398  * the passed struct ftmn_func_arg pointer was NULL.
399  *
400  * Each FTMN_PUSH_LINKED_CALL() is supposed to be matched by a
401  * FTMN_POP_LINKED_CALL().
402  */
403 #define FTMN_PUSH_LINKED_CALL(ftmn, called_func_hash) \
404 	__ftmn_push_linked_call((ftmn), FTMN_FUNC_HASH(__func__), \
405 				(called_func_hash))
406 
407 /*
408  * FTMN_SET_CHECK_RES_FROM_CALL() - copy the result from a linked call
409  * @ftmn:	The struct ftmn used during the linked call
410  * @incr:	Value to increase the checked state with
411  * @res:	Returned result to be match against the saved/copied result
412  *
413  * This macro is called just after a checked linked function has returned.
414  * The return value from the function is copied from the struct ftmn_func_arg
415  * passed to the called function into the local checked state. The checked
416  * state is increased with @incr. @res is checked against the saved result
417  * of the called function.
418  */
419 #define FTMN_SET_CHECK_RES_FROM_CALL(ftmn, incr, res) \
420 	__ftmn_copy_linked_call_res((ftmn), (incr), (res))
421 
422 /*
423  * FTMN_POP_LINKED_CALL() - remove a call from a linked call chain
424  * @ftmn:	The local struct ftmn
425  *
426  * Supposed to match a call to FTMN_PUSH_LINKED_CALL()
427  */
428 #define FTMN_POP_LINKED_CALL(ftmn) __ftmn_pop_linked_call((ftmn))
429 
430 /*
431  * FTMN_CALL_FUNC() - Do a linked call to a function
432  * @res:	Variable to be assigned the result of the called function
433  * @ftmn:	The local struct ftmn
434  * @incr:	Value to increase the checked state with
435  * @func:	Function to be called
436  * @...:	Arguments to pass to @func
437  *
438  * This macro can be used to make a linked call to another function, the
439  * callee. This macro depends on the callee to always update the struct
440  * ftmn_func_arg (part of struct ftmn) even when returning an error.
441  *
442  * Note that in the cases where the callee may skip updating the struct
443  * ftmn_func_arg this macro cannot be used as
444  * FTMN_SET_CHECK_RES_FROM_CALL() would cause a panic due to mismatching
445  * return value and saved result.
446  */
447 #define FTMN_CALL_FUNC(res, ftmn, incr, func, ...) \
448 	do { \
449 		FTMN_PUSH_LINKED_CALL((ftmn), FTMN_FUNC_HASH(#func)); \
450 		(res) = func(__VA_ARGS__); \
451 		FTMN_SET_CHECK_RES_FROM_CALL((ftmn), (incr), (res)); \
452 		FTMN_POP_LINKED_CALL((ftmn)); \
453 	} while (0)
454 
455 /*
456  * FTMN_CALLEE_DONE() - Record result of callee
457  * @res:	Result or return value
458  *
459  * The passed result will be stored in the struct ftmn_func_arg struct
460  * supplied by the caller. This function must only be called once by the
461  * callee.
462  *
463  * Note that this function is somewhat dangerous as any passed value will
464  * be stored so if the value has been tampered with there is no additional
465  * redundant checks to rely on.
466  */
467 #define FTMN_CALLEE_DONE(res) \
468 	__ftmn_callee_done(__ftmn_get_tsd_func_arg(), \
469 			   FTMN_FUNC_HASH(__func__), (res))
470 /*
471  * FTMN_CALLEE_DONE_NOT_ZERO() - Record non-zero result of callee
472  * @res:	Result or return value
473  *
474  * The passed result will be stored in the struct ftmn_func_arg struct
475  * supplied by the caller. This function must only be called once by the
476  * callee.
477  *
478  * Note that this function is somewhat dangerous as any passed value will
479  * be stored so if the value has been tampered with there is no additional
480  * redundant checks to rely on. However, there are extra checks against
481  * unintentionally storing a zero which often is interpreted as a
482  * successful return value.
483  */
484 #define FTMN_CALLEE_DONE_NOT_ZERO(res) \
485 	__ftmn_callee_done_not_zero(__ftmn_get_tsd_func_arg(), \
486 				    FTMN_FUNC_HASH(__func__), (res))
487 
488 /*
489  * FTMN_CALLEE_DONE_CHECK() - Record result of callee with checked state
490  * @ftmn:	The local struct ftmn
491  * @incr:	Value to increase the checked state with
492  * @exp_steps:	Expected recorded checkpoints
493  * @res:	Result or return value
494  *
495  * The passed result will be stored in the struct ftmn_func_arg struct
496  * supplied by the caller. This function must only be called once by the
497  * callee.
498  *
499  * @res is double checked against the value stored in local checked state.
500  * @exp_steps is checked against the locate checked state. The local
501  * checked state is increased by @incr.
502  */
503 #define FTMN_CALLEE_DONE_CHECK(ftmn, incr, exp_steps, res) \
504 	__ftmn_callee_done_check((ftmn), FTMN_FUNC_HASH(__func__), \
505 				 (incr), (exp_steps), (res))
506 
507 /*
508  * FTMN_CALLEE_DONE_MEMCMP() - Record result of memcmp() in a callee
509  * @my_memcmp:		Function pointer of custom memcmp()
510  * @p1:			Pointer to first buffer
511  * @p2:			Pointer to second buffer
512  * @nb:			Number of bytes
513  *
514  * The result from the mem compare is saved in the local checked state.
515  * This function must only be called once by the callee.
516  */
517 #define FTMN_CALLEE_DONE_MEMCMP(my_memcmp, p1, p2, nb) \
518 	__ftmn_callee_done_memcmp(__ftmn_get_tsd_func_arg(), \
519 				  FTMN_FUNC_HASH(__func__), (my_memcmp), \
520 				  (p1), (p2), (nb))
521 
522 /*
523  * FTMN_CALLEE_UPDATE_NOT_ZERO() - Update the result of a callee with a
524  *				   non-zero value
525  * @res:	Result or return value
526  *
527  * The passed result will be stored in the struct ftmn_func_arg struct
528  * supplied by the caller. This function can be called any number of times
529  * by the callee, provided that one of the FTMN_CALLEE_DONE_XXX() functions
530  * has been called first.
531  *
532  * Note that this function is somewhat dangerous as any passed value will
533  * be stored so if the value has been tampered with there is no additional
534  * redundant checks to rely on. However, there are extra checks against
535  * unintentionally storing a zero which often is interpreted as a
536  * successful return value.
537  */
538 #define FTMN_CALLEE_UPDATE_NOT_ZERO(res) \
539 	__ftmn_callee_update_not_zero(__ftmn_get_tsd_func_arg(), res)
540 
541 /*
542  * FTMN_CALLEE_SWAP_HASH() - Remove old hash and add new hash
543  * @my_old_hash:	The old hash to remove
544  *
545  * This macro replaces the old expected function hash with the hash of the
546  * current function.
547  *
548  * If a function is called using an alias the caller uses the hash of the
549  * alias not the real function name. This hash is recoded in the field
550  * "hash" in struct ftmn_func_arg which can be found with
551  * __ftmn_get_tsd_func_arg().
552  *
553  * The FTMN_CALLE_* functions only work with the real function name so the
554  * old hash must be removed and replaced with the new for the calling
555  * function to be able to verify the result.
556  */
557 #define FTMN_CALLEE_SWAP_HASH(my_old_hash) \
558 	__ftmn_calle_swap_hash(__ftmn_get_tsd_func_arg(), \
559 			       (my_old_hash), FTMN_FUNC_HASH(__func__))
560 
561 /*
562  * FTMN_SET_CHECK_RES() - Records a result in local checked state
563  * @ftmn:	The local struct ftmn
564  * @incr:	Value to increase the checked state with
565  * @res:	Result or return value
566  *
567  * Note that this function is somewhat dangerous as any passed value will
568  * be stored so if the value has been tampered with there is no additional
569  * redundant checks to rely on.
570  */
571 #define FTMN_SET_CHECK_RES(ftmn, incr, res) \
572 	__ftmn_set_check_res((ftmn), (incr), (res))
573 
574 /*
575  * FTMN_SET_CHECK_RES_NOT_ZERO() - Records a non-zero result in local checked
576  *				   state
577  * @ftmn:	The local struct ftmn
578  * @incr:	Value to increase the checked state with
579  * @res:	Result or return value
580  *
581  * Note that this function is somewhat dangerous as any passed value will
582  * be stored so if the value has been tampered with there is no additional
583  * redundant checks to rely on. However, there are extra checks against
584  * unintentionally storing a zero which often is interpreted as a
585  * successful return value.
586  */
587 #define FTMN_SET_CHECK_RES_NOT_ZERO(ftmn, incr, res) \
588 	__ftmn_set_check_res_not_zero((ftmn), (incr), (res))
589 
590 static inline int ftmn_set_check_res_memcmp(struct ftmn *ftmn,
591 					    enum ftmn_incr incr,
592 					    ftmn_memcmp_t my_memcmp,
593 					    const void *p1, const void *p2,
594 					    size_t nb)
595 {
596 	int res = my_memcmp(p1, p2, nb);
597 
598 	if (IS_ENABLED(CFG_FAULT_MITIGATION))
599 		___ftmn_set_check_res_memcmp(&ftmn->check, incr, res,
600 					     my_memcmp, p1, p2, nb);
601 
602 	return res;
603 }
604 
605 /*
606  * FTMN_STEP_COUNT() - Calculate total step count
607  *
608  * Takes variable number of arguments, up to a total of 6. Where arg0
609  * is the number of times the counter has been increased by FTMN_INCR0,
610  * arg1 FTMN_INCR1 and so on.
611  */
612 #define FTMN_STEP_COUNT(...)	\
613 	__ftmn_step_count(__ftmn_args_count(__VA_ARGS__), __VA_ARGS__)
614 
615 /*
616  * ftmn_checkpoint() - Add a checkpoint
617  * @ftmn:	The local struct ftmn
618  * @incr:	Value to increase the checked state with
619  *
620  * Adds a checkpoint by increasing the internal checked state. This
621  * can be checked at a later point in the calling function, for instance
622  * with ftmn_return_res().
623  */
624 static inline void ftmn_checkpoint(struct ftmn *ftmn, enum ftmn_incr incr)
625 {
626 	if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
627 		/*
628 		 * The purpose of the barriers is to prevent the compiler
629 		 * from optimizing this increase to some other location
630 		 * in the calling function.
631 		 */
632 		barrier();
633 		ftmn->check.steps += incr;
634 		barrier();
635 	}
636 }
637 
638 /*
639  * ftmn_expect_state() - Check expected state
640  * @ftmn:	The local struct ftmn
641  * @incr:	Value to increase the checked state with
642  * @steps:	Expected accumulated steps
643  * @res:	Expected saved result or return value
644  *
645  * This is a more advanced version of ftmn_checkpoint() which before
646  * increasing the accumulated steps first checks the accumulated steps and
647  * saved result or return value.
648  */
649 static inline void ftmn_expect_state(struct ftmn *ftmn,
650 				     enum ftmn_incr incr, unsigned long steps,
651 				     unsigned long res)
652 {
653 	if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
654 		assert((ftmn->check.res ^ FTMN_DEFAULT_HASH) == res);
655 		assert(ftmn->check.steps == steps);
656 
657 		___ftmn_expect_state(&ftmn->check, incr, steps, res);
658 	}
659 }
660 
661 /*
662  * ftmn_return_res() - Check and return result
663  * @ftmn:	The local struct ftmn
664  * @steps:	Expected accumulated steps
665  * @res:	Expected saved result or return value
666  *
667  * Checks that the internal accumulated state matches the supplied @steps
668  * and that the saved result or return value matches the supplied one.
669  *
670  * Returns @res.
671  */
672 static inline unsigned long ftmn_return_res(struct ftmn *ftmn,
673 					    unsigned long steps,
674 					    unsigned long res)
675 {
676 	/*
677 	 * We're expecting that the compiler does a tail call optimization
678 	 * allowing ___ftmn_return_res() to have full control over the
679 	 * returned value. Thus trying to reduce the window where the
680 	 * return value can be tampered with.
681 	 */
682 	if (IS_ENABLED(CFG_FAULT_MITIGATION)) {
683 		assert((ftmn->check.res ^ FTMN_DEFAULT_HASH) == res);
684 		assert(ftmn->check.steps == steps);
685 
686 		return ___ftmn_return_res(&ftmn->check, steps, res);
687 	}
688 	return res;
689 }
690 #endif /*__FAULT_MITIGATION_H*/
691