xref: /optee_os/lib/libutils/ext/asan.c (revision 9f2dc7a17efdd58b8afa6c45edcc5bb99c766bfa)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5  */
6 
7 #include <asan.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <printk.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <trace.h>
14 #include <types_ext.h>
15 #include <util.h>
16 
17 #if __GCC_VERSION >= 70000
18 #define ASAN_ABI_VERSION 7
19 #else
20 #define ASAN_ABI_VERSION 6
21 #endif
22 
23 #if defined(__KERNEL__)
24 # include <keep.h>
25 # include <kernel/panic.h>
26 #elif defined(__LDELF__)
27 # include <ldelf_syscalls.h>
28 # include <ldelf.h>
29 #else
30 # include <tee_internal_api_extensions.h>
31 # include <utee_syscalls.h>
32 #endif
33 
34 #ifndef __KERNEL__
35 /* Stub for non-kernel builds */
36 #define DECLARE_KEEP_INIT(x)
37 #endif
38 
39 #ifndef SMALL_PAGE_SIZE
40 #define SMALL_PAGE_SIZE 4096
41 #endif
42 
43 struct asan_source_location {
44 	const char *file_name;
45 	int line_no;
46 	int column_no;
47 };
48 
49 struct asan_global {
50 	uintptr_t beg;
51 	uintptr_t size;
52 	uintptr_t size_with_redzone;
53 	const char *name;
54 	const char *module_name;
55 	uintptr_t has_dynamic_init;
56 	struct asan_source_location *location;
57 #if ASAN_ABI_VERSION >= 7
58 	uintptr_t odr_indicator;
59 #endif
60 };
61 
62 #ifdef __KERNEL__
63 static struct asan_global_info __asan_global_info;
64 #endif
65 
66 static bool asan_active;
67 static asan_panic_cb_t asan_panic_cb = asan_panic;
68 
69 void __noreturn asan_panic(void)
70 {
71 #if defined(__KERNEL__)
72 	panic();
73 #elif defined(__LDELF__)
74 	_ldelf_panic(2);
75 #else
76 	_utee_panic(TEE_ERROR_GENERIC);
77 #endif
78 	/*
79 	 * _utee_panic is not marked as noreturn.
80 	 * See _utee_panic prototype in utee_syscalls.h for reasoning. To
81 	 * prevent "‘noreturn’ function does return" warning the while loop
82 	 * is used.
83 	 */
84 	while (1)
85 		;
86 }
87 
88 static bool addr_crosses_scale_boundary(vaddr_t addr, size_t size)
89 {
90 	return (addr >> ASAN_BLOCK_SHIFT) !=
91 	       ((addr + size - 1) >> ASAN_BLOCK_SHIFT);
92 }
93 
94 static int8_t *va_to_shadow(const void *va)
95 {
96 #if defined(__KERNEL__)
97 	vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
98 		     CFG_ASAN_SHADOW_OFFSET;
99 #else
100 	vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
101 		     CFG_USER_ASAN_SHADOW_OFFSET;
102 #endif
103 	return (int8_t *)sa;
104 }
105 
106 static size_t va_range_to_shadow_size(const void *begin, const void *end)
107 {
108 	return ((vaddr_t)end - (vaddr_t)begin) / ASAN_BLOCK_SIZE;
109 }
110 
111 static bool va_range_inside_shadow(const void *begin, const void *end)
112 {
113 	struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
114 	vaddr_t b = (vaddr_t)begin;
115 	vaddr_t e = (vaddr_t)end;
116 	unsigned int i = 0;
117 
118 	if (b >= e)
119 		return false;
120 
121 	for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
122 		if (b >= regs[i].lo && e <= regs[i].hi) {
123 			/* Access is covered fully by at least one region */
124 			return true;
125 		}
126 	}
127 
128 	return false;
129 }
130 
131 static bool va_range_outside_shadow(const void *begin, const void *end)
132 {
133 	struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
134 	vaddr_t b = (vaddr_t)begin;
135 	vaddr_t e = (vaddr_t)end;
136 	unsigned int i = 0;
137 
138 	if (b >= e)
139 		return false;
140 
141 	for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
142 		if (b < regs[i].hi && e > regs[i].lo) {
143 			/* Access covers region at least partly */
144 			return false;
145 		}
146 	}
147 
148 	return true;
149 }
150 
151 static size_t va_misalignment(const void *va)
152 {
153 	return (vaddr_t)va & ASAN_BLOCK_MASK;
154 }
155 
156 static bool va_is_well_aligned(const void *va)
157 {
158 	return !va_misalignment(va);
159 }
160 
161 void asan_add_shadowed(const void *begin, const void *end)
162 {
163 	struct asan_va_reg reg = {(vaddr_t)begin, (vaddr_t)end};
164 	struct asan_global_info *asan_info = GET_ASAN_INFO();
165 
166 	assert(va_is_well_aligned(begin));
167 	assert(va_is_well_aligned(end));
168 	assert(reg.lo < reg.hi);
169 	if (asan_info->regs_count < ASAN_VA_REGS_MAX) {
170 		asan_info->regs[asan_info->regs_count++] = reg;
171 	} else {
172 		EMSG("No free regions to allocate");
173 		asan_panic();
174 	}
175 }
176 
177 void asan_tag_no_access(const void *begin, const void *end)
178 {
179 	assert(va_is_well_aligned(begin));
180 	assert(va_is_well_aligned(end));
181 	assert(va_range_inside_shadow(begin, end));
182 
183 	asan_memset_unchecked(va_to_shadow(begin), ASAN_DATA_RED_ZONE,
184 			      va_range_to_shadow_size(begin, end));
185 }
186 
187 void asan_tag_access(const void *begin, const void *end)
188 {
189 	if (!GET_ASAN_INFO()->regs_count || begin == end)
190 		return;
191 
192 	assert(va_range_inside_shadow(begin, end));
193 	assert(va_is_well_aligned(begin));
194 
195 	asan_memset_unchecked(va_to_shadow(begin), 0,
196 			      va_range_to_shadow_size(begin, end));
197 	if (!va_is_well_aligned(end))
198 		*va_to_shadow(end) = va_misalignment(end);
199 }
200 
201 void asan_tag_heap_free(const void *begin, const void *end)
202 {
203 	if (!GET_ASAN_INFO()->regs_count)
204 		return;
205 
206 	assert(va_range_inside_shadow(begin, end));
207 	assert(va_is_well_aligned(begin));
208 	assert(va_is_well_aligned(end));
209 
210 	asan_memset_unchecked(va_to_shadow(begin), ASAN_HEAP_RED_ZONE,
211 			      va_range_to_shadow_size(begin, end));
212 }
213 
214 __inhibit_loop_to_libcall void *asan_memset_unchecked(void *s, int c, size_t n)
215 {
216 	uint8_t *b = s;
217 	size_t m;
218 
219 	for (m = 0; m < n; m++)
220 		b[m] = c;
221 
222 	return s;
223 }
224 
225 __inhibit_loop_to_libcall
226 void *asan_memcpy_unchecked(void *__restrict dst, const void *__restrict src,
227 			    size_t len)
228 {
229 	uint8_t *__restrict d = dst;
230 	const uint8_t *__restrict s = src;
231 	size_t n;
232 
233 	for (n = 0; n < len; n++)
234 		d[n] = s[n];
235 
236 	return dst;
237 }
238 
239 void asan_start(void)
240 {
241 	assert(GET_ASAN_INFO()->regs_count > 0 && !asan_active);
242 	asan_active = true;
243 }
244 
245 void asan_set_panic_cb(asan_panic_cb_t panic_cb)
246 {
247 	asan_panic_cb = panic_cb;
248 }
249 
250 static void asan_report(vaddr_t addr, size_t size)
251 {
252 #ifdef KASAN_DUMP_SHADOW
253 	char buf[128] = {0};
254 	int r = 0, rc = 0;
255 	vaddr_t b = 0, e = 0, saddr = 0;
256 
257 	b = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) - ASAN_BLOCK_SIZE;
258 	e = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) + ASAN_BLOCK_SIZE;
259 
260 	/* Print shadow map nearby */
261 	if (va_range_inside_shadow((void *)b, (void *)e)) {
262 		rc = snprintk(buf + r, sizeof(buf) - r, "%lx: ", b);
263 		assert(rc > 0);
264 		r += rc;
265 		for (saddr = b; saddr <= e; saddr += ASAN_BLOCK_SIZE) {
266 			int8_t *sbyte = va_to_shadow((void *)saddr);
267 
268 			rc = snprintk(buf + r, sizeof(buf) - r,
269 				      "0x%02x ", (uint8_t)*sbyte);
270 			assert(rc > 0);
271 			r += rc;
272 		}
273 		EMSG("%s", buf);
274 	}
275 #endif
276 	EMSG("[ASAN]: access violation, addr: %lx size: %zu\n",
277 	     addr, size);
278 
279 	asan_panic_cb();
280 }
281 
282 static __always_inline bool asan_shadow_1byte_isvalid(vaddr_t addr)
283 {
284 	int8_t last = (addr & ASAN_BLOCK_MASK) + 1;
285 	int8_t *byte = va_to_shadow((void *)addr);
286 
287 	if (*byte == 0 || last <= *byte)
288 		return true;
289 
290 	return false;
291 }
292 
293 static __always_inline bool asan_shadow_2byte_isvalid(vaddr_t addr)
294 {
295 	if (addr_crosses_scale_boundary(addr, 2)) {
296 		return (asan_shadow_1byte_isvalid(addr) &&
297 			asan_shadow_1byte_isvalid(addr + 1));
298 	} else {
299 		int8_t last = ((addr + 1) & ASAN_BLOCK_MASK) + 1;
300 		int8_t *byte = va_to_shadow((void *)addr);
301 
302 		if (*byte == 0 || last <= *byte)
303 			return true;
304 
305 		return false;
306 	}
307 }
308 
309 static __always_inline bool asan_shadow_4byte_isvalid(vaddr_t addr)
310 {
311 	if (addr_crosses_scale_boundary(addr, 4)) {
312 		return (asan_shadow_2byte_isvalid(addr) &&
313 			asan_shadow_2byte_isvalid(addr + 2));
314 	} else {
315 		int8_t last = ((addr + 3) & ASAN_BLOCK_MASK) + 1;
316 		int8_t *byte = va_to_shadow((void *)addr);
317 
318 		if (*byte == 0 || last <= *byte)
319 			return true;
320 
321 		return false;
322 	}
323 }
324 
325 static __always_inline bool asan_shadow_8byte_isvalid(vaddr_t addr)
326 {
327 	if (addr_crosses_scale_boundary(addr, 8)) {
328 		return (asan_shadow_4byte_isvalid(addr) &&
329 			asan_shadow_4byte_isvalid(addr + 4));
330 	} else {
331 		int8_t last = ((addr + 7) & ASAN_BLOCK_MASK) + 1;
332 		int8_t *byte = va_to_shadow((void *)addr);
333 
334 		if (*byte == 0 || last <= *byte)
335 			return true;
336 
337 		return false;
338 	}
339 }
340 
341 static __always_inline bool asan_shadow_Nbyte_isvalid(vaddr_t addr,
342 						      size_t size)
343 {
344 	size_t i = 0;
345 
346 	for (; i < size; i++) {
347 		if (!asan_shadow_1byte_isvalid(addr + i))
348 			return false;
349 	}
350 
351 	return true;
352 }
353 
354 static __always_inline void check_access(vaddr_t addr, size_t size)
355 {
356 	bool valid = false;
357 	void *begin = (void *)addr;
358 	void *end = (void *)(addr + size);
359 
360 	if (!asan_active)
361 		return;
362 	if (size == 0)
363 		return;
364 	if (va_range_outside_shadow(begin, end))
365 		return;
366 	/*
367 	 * If it isn't outside it has to be completely inside or there's a
368 	 * problem.
369 	 */
370 	if (!va_range_inside_shadow(begin, end))
371 		asan_panic();
372 
373 	if (__builtin_constant_p(size)) {
374 		switch (size) {
375 		case 1:
376 			valid = asan_shadow_1byte_isvalid(addr);
377 			break;
378 		case 2:
379 			valid = asan_shadow_2byte_isvalid(addr);
380 			break;
381 		case 4:
382 			valid = asan_shadow_4byte_isvalid(addr);
383 			break;
384 		case 8:
385 			valid = asan_shadow_8byte_isvalid(addr);
386 			break;
387 		default:
388 			valid = asan_shadow_Nbyte_isvalid(addr, size);
389 			break;
390 		}
391 	} else {
392 		valid = asan_shadow_Nbyte_isvalid(addr, size);
393 	}
394 
395 	if (!valid)
396 		asan_report(addr, size);
397 }
398 
399 static __always_inline void check_load(vaddr_t addr, size_t size)
400 {
401 	check_access(addr, size);
402 }
403 
404 static __always_inline void check_store(vaddr_t addr, size_t size)
405 {
406 	check_access(addr, size);
407 }
408 
409 static void __noreturn report_load(vaddr_t addr __unused, size_t size __unused)
410 {
411 	asan_panic();
412 }
413 
414 static void __noreturn report_store(vaddr_t addr __unused, size_t size __unused)
415 {
416 	asan_panic();
417 }
418 
419 
420 
421 #define DEFINE_ASAN_FUNC(type, size)				\
422 	void __asan_##type##size(vaddr_t addr);			\
423 	void __asan_##type##size(vaddr_t addr)			\
424 	{ check_##type(addr, size); }				\
425 	void __asan_##type##size##_noabort(vaddr_t addr);	\
426 	void __asan_##type##size##_noabort(vaddr_t addr)	\
427 	{ check_##type(addr, size); }				\
428 	void __asan_report_##type##size##_noabort(vaddr_t addr);\
429 	void __noreturn __asan_report_##type##size##_noabort(vaddr_t addr) \
430 	{ report_##type(addr, size); }
431 
432 DEFINE_ASAN_FUNC(load, 1)
433 DEFINE_ASAN_FUNC(load, 2)
434 DEFINE_ASAN_FUNC(load, 4)
435 DEFINE_ASAN_FUNC(load, 8)
436 DEFINE_ASAN_FUNC(load, 16)
437 DEFINE_ASAN_FUNC(store, 1)
438 DEFINE_ASAN_FUNC(store, 2)
439 DEFINE_ASAN_FUNC(store, 4)
440 DEFINE_ASAN_FUNC(store, 8)
441 DEFINE_ASAN_FUNC(store, 16)
442 
443 void __asan_loadN_noabort(vaddr_t addr, size_t size);
444 void __asan_loadN_noabort(vaddr_t addr, size_t size)
445 {
446 	check_load(addr, size);
447 }
448 
449 void __asan_storeN_noabort(vaddr_t addr, size_t size);
450 void __asan_storeN_noabort(vaddr_t addr, size_t size)
451 {
452 	check_store(addr, size);
453 }
454 
455 void __asan_report_load_n_noabort(vaddr_t addr, size_t size);
456 void __noreturn __asan_report_load_n_noabort(vaddr_t addr, size_t size)
457 {
458 	report_load(addr, size);
459 }
460 
461 void __asan_report_store_n_noabort(vaddr_t addr, size_t size);
462 void __noreturn __asan_report_store_n_noabort(vaddr_t addr, size_t size)
463 {
464 	report_store(addr, size);
465 }
466 
467 void __asan_handle_no_return(void);
468 void __asan_handle_no_return(void)
469 {
470 }
471 
472 void __asan_register_globals(struct asan_global *globals, size_t size);
473 void __asan_register_globals(struct asan_global *globals, size_t size)
474 {
475 	size_t n = 0;
476 
477 	for (n = 0; n < size; n++) {
478 		vaddr_t begin = globals[n].beg;
479 		vaddr_t end = begin + globals[n].size;
480 		vaddr_t end_align = ROUNDUP(end, ASAN_BLOCK_SIZE);
481 		vaddr_t end_rz = begin + globals[n].size_with_redzone;
482 
483 		asan_tag_access((void *)begin, (void *)end);
484 		asan_tag_no_access((void *)end_align, (void *)end_rz);
485 	}
486 }
487 DECLARE_KEEP_INIT(__asan_register_globals);
488 
489 void __asan_unregister_globals(struct asan_global *globals, size_t size);
490 void __asan_unregister_globals(struct asan_global *globals __unused,
491 			       size_t size __unused)
492 {
493 }
494 
495 void asan_handle_longjmp(void *old_sp)
496 {
497 	void *top = old_sp;
498 	void *bottom = (void *)ROUNDDOWN((vaddr_t)&top,
499 					 ASAN_BLOCK_SIZE);
500 
501 	asan_tag_access(bottom, top);
502 }
503 
504 #if !defined(__KERNEL__)
505 
506 static int asan_map_shadow_region(vaddr_t lo, vaddr_t hi)
507 {
508 	struct asan_global_info *asan_info = GET_ASAN_INFO();
509 	TEE_Result rc = TEE_SUCCESS;
510 	size_t sz = hi - lo;
511 	vaddr_t req = lo;
512 
513 	if (asan_info->s_regs_count >= ASAN_VA_REGS_MAX)
514 		return -1;
515 
516 #if defined(__LDELF__)
517 	rc = _ldelf_map_zi(&req, sz, 0, 0, 0);
518 #else
519 	req = (vaddr_t)tee_map_zi_va(req, sz, 0);
520 #endif
521 	if (rc != TEE_SUCCESS)
522 		return -1;
523 	if (req != lo)
524 		return -1;
525 
526 	asan_info->s_regs[asan_info->s_regs_count++] =
527 		(struct asan_va_reg){ lo, hi };
528 
529 	return 0;
530 }
531 
532 int asan_user_map_shadow(void *lo, void *hi)
533 {
534 	vaddr_t lo_s = 0;
535 	vaddr_t hi_s = 0;
536 	int rc = 0;
537 
538 	if (lo == hi)
539 		return -1;
540 
541 	lo_s = ROUNDDOWN((vaddr_t)va_to_shadow(lo), SMALL_PAGE_SIZE);
542 	hi_s = ROUNDUP((vaddr_t)va_to_shadow(hi), SMALL_PAGE_SIZE);
543 
544 	if (lo_s >= hi_s)
545 		return -1;
546 	if (hi >= (void *)GET_ASAN_INFO())
547 		return -1;
548 
549 	/*
550 	 * Walk the already mapped shadow ranges and trim [lo_s, hi_s)
551 	 * down to the part that is still missing.
552 	 */
553 	for (size_t i = 0; i < GET_ASAN_INFO()->s_regs_count; i++) {
554 		vaddr_t reg_lo_s = GET_ASAN_INFO()->s_regs[i].lo;
555 		vaddr_t reg_hi_s = GET_ASAN_INFO()->s_regs[i].hi;
556 
557 		if (reg_hi_s <= lo_s || reg_lo_s >= hi_s) {
558 			/*
559 			 * This mapped range does not intersect the
560 			 * requested range. Skip it.
561 			 */
562 			continue;
563 		}
564 		if (reg_lo_s <= lo_s && reg_hi_s >= hi_s) {
565 			/*
566 			 * The requested shadow range is already fully
567 			 * mapped, so there is nothing left to do.
568 			 */
569 			goto out;
570 		}
571 		if (reg_lo_s <= lo_s && reg_hi_s < hi_s) {
572 			/*
573 			 * The mapped range covers the left side of
574 			 * the requested range.
575 			 */
576 			lo_s = reg_hi_s;
577 			continue;
578 		}
579 		if (reg_lo_s > lo_s && reg_hi_s >= hi_s) {
580 			/*
581 			 * The mapped range covers the right side of
582 			 * the requested range.
583 			 */
584 			hi_s = reg_lo_s;
585 			continue;
586 		}
587 		/*
588 		 * If we are here then there is a problem, that shouldn't
589 		 * happen for valid shadow mapping intervals.
590 		 */
591 		EMSG("can't handle: reg_lo_s %#"PRIxVA" reg_hi_s %#"
592 		     PRIxVA" lo_s %#"PRIxVA" hi_s %#"PRIxVA, reg_lo_s,
593 		     reg_hi_s, lo_s, hi_s);
594 		asan_panic();
595 	}
596 	/*
597 	 * If we reach this point, [lo_s, hi_s) is the remaining shadow
598 	 * gap that still needs to be mapped.
599 	 */
600 	assert(hi_s > lo_s);
601 	rc = asan_map_shadow_region(lo_s, hi_s);
602 	if (rc) {
603 		EMSG("Failed to map shadow region");
604 		asan_panic();
605 	}
606 out:
607 	/* Remember the original VA range as checked by ASan. */
608 	asan_add_shadowed(lo, hi);
609 	return 0;
610 }
611 
612 #else
613 
614 int asan_user_map_shadow(void *lo __unused, void *hi __unused)
615 {
616 	return 0;
617 }
618 #endif
619