xref: /optee_os/lib/libutils/ext/asan.c (revision 081fba0cd741cce07a7ab56e592273cc9d26efa5)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5  */
6 
7 #include <asan.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <printk.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <trace.h>
14 #include <types_ext.h>
15 #include <util.h>
16 
17 #if __GCC_VERSION >= 70000
18 #define ASAN_ABI_VERSION 7
19 #else
20 #define ASAN_ABI_VERSION 6
21 #endif
22 
23 #if defined(__KERNEL__)
24 # include <keep.h>
25 # include <kernel/panic.h>
26 #elif defined(__LDELF__)
27 # include <ldelf_syscalls.h>
28 # include <ldelf.h>
29 #else
30 # include <tee_internal_api_extensions.h>
31 # include <utee_syscalls.h>
32 #endif
33 
34 #ifndef __KERNEL__
35 /* Stub for non-kernel builds */
36 #define DECLARE_KEEP_INIT(x)
37 #endif
38 
39 #ifndef SMALL_PAGE_SIZE
40 #define SMALL_PAGE_SIZE 4096
41 #endif
42 
43 #if TRACE_LEVEL >= TRACE_DEBUG
44 #define KASAN_DUMP_SHADOW
45 #endif
46 
47 struct asan_source_location {
48 	const char *file_name;
49 	int line_no;
50 	int column_no;
51 };
52 
53 struct asan_global {
54 	uintptr_t beg;
55 	uintptr_t size;
56 	uintptr_t size_with_redzone;
57 	const char *name;
58 	const char *module_name;
59 	uintptr_t has_dynamic_init;
60 	struct asan_source_location *location;
61 #if ASAN_ABI_VERSION >= 7
62 	uintptr_t odr_indicator;
63 #endif
64 };
65 
66 #ifdef __KERNEL__
67 static struct asan_global_info __asan_global_info;
68 #endif
69 
70 static bool asan_active;
71 static asan_panic_cb_t asan_panic_cb = asan_panic;
72 
73 void __noreturn asan_panic(void)
74 {
75 #if defined(__KERNEL__)
76 	panic();
77 #elif defined(__LDELF__)
78 	_ldelf_panic(2);
79 #else
80 	_utee_panic(TEE_ERROR_GENERIC);
81 #endif
82 	/*
83 	 * _utee_panic is not marked as noreturn.
84 	 * See _utee_panic prototype in utee_syscalls.h for reasoning. To
85 	 * prevent "‘noreturn’ function does return" warning the while loop
86 	 * is used.
87 	 */
88 	while (1)
89 		;
90 }
91 
92 static bool addr_crosses_scale_boundary(vaddr_t addr, size_t size)
93 {
94 	return (addr >> ASAN_BLOCK_SHIFT) !=
95 	       ((addr + size - 1) >> ASAN_BLOCK_SHIFT);
96 }
97 
98 static int8_t *va_to_shadow(const void *va)
99 {
100 #if defined(__KERNEL__)
101 	vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
102 		     CFG_ASAN_SHADOW_OFFSET;
103 #else
104 	vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
105 		     CFG_USER_ASAN_SHADOW_OFFSET;
106 #endif
107 	return (int8_t *)sa;
108 }
109 
110 static size_t va_range_to_shadow_size(const void *begin, const void *end)
111 {
112 	return ((vaddr_t)end - (vaddr_t)begin) / ASAN_BLOCK_SIZE;
113 }
114 
115 static bool va_range_inside_shadow(const void *begin, const void *end)
116 {
117 	struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
118 	vaddr_t b = (vaddr_t)begin;
119 	vaddr_t e = (vaddr_t)end;
120 	unsigned int i = 0;
121 
122 	if (b >= e)
123 		return false;
124 
125 	for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
126 		if (b >= regs[i].lo && e <= regs[i].hi) {
127 			/* Access is covered fully by at least one region */
128 			return true;
129 		}
130 	}
131 
132 	return false;
133 }
134 
135 static bool va_range_outside_shadow(const void *begin, const void *end)
136 {
137 	struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
138 	vaddr_t b = (vaddr_t)begin;
139 	vaddr_t e = (vaddr_t)end;
140 	unsigned int i = 0;
141 
142 	if (b >= e)
143 		return false;
144 
145 	for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
146 		if (b < regs[i].hi && e > regs[i].lo) {
147 			/* Access covers region at least partly */
148 			return false;
149 		}
150 	}
151 
152 	return true;
153 }
154 
155 static size_t va_misalignment(const void *va)
156 {
157 	return (vaddr_t)va & ASAN_BLOCK_MASK;
158 }
159 
160 static bool va_is_well_aligned(const void *va)
161 {
162 	return !va_misalignment(va);
163 }
164 
165 void asan_add_shadowed(const void *begin, const void *end)
166 {
167 	struct asan_va_reg reg = {(vaddr_t)begin, (vaddr_t)end};
168 	struct asan_global_info *asan_info = GET_ASAN_INFO();
169 
170 	assert(va_is_well_aligned(begin));
171 	assert(va_is_well_aligned(end));
172 	assert(reg.lo < reg.hi);
173 	if (asan_info->regs_count < ASAN_VA_REGS_MAX) {
174 		asan_info->regs[asan_info->regs_count++] = reg;
175 	} else {
176 		EMSG("No free regions to allocate");
177 		asan_panic();
178 	}
179 }
180 
181 void asan_tag_no_access(const void *begin, const void *end)
182 {
183 	assert(va_is_well_aligned(begin));
184 	assert(va_is_well_aligned(end));
185 	assert(va_range_inside_shadow(begin, end));
186 
187 	asan_memset_unchecked(va_to_shadow(begin), ASAN_DATA_RED_ZONE,
188 			      va_range_to_shadow_size(begin, end));
189 }
190 
191 void asan_tag_access(const void *begin, const void *end)
192 {
193 	if (!GET_ASAN_INFO()->regs_count || begin == end)
194 		return;
195 
196 	assert(va_range_inside_shadow(begin, end));
197 	assert(va_is_well_aligned(begin));
198 
199 	asan_memset_unchecked(va_to_shadow(begin), 0,
200 			      va_range_to_shadow_size(begin, end));
201 	if (!va_is_well_aligned(end))
202 		*va_to_shadow(end) = va_misalignment(end);
203 }
204 
205 void asan_tag_heap_free(const void *begin, const void *end)
206 {
207 	if (!GET_ASAN_INFO()->regs_count)
208 		return;
209 
210 	assert(va_range_inside_shadow(begin, end));
211 	assert(va_is_well_aligned(begin));
212 	assert(va_is_well_aligned(end));
213 
214 	asan_memset_unchecked(va_to_shadow(begin), ASAN_HEAP_RED_ZONE,
215 			      va_range_to_shadow_size(begin, end));
216 }
217 
218 __inhibit_loop_to_libcall void *asan_memset_unchecked(void *s, int c, size_t n)
219 {
220 	uint8_t *b = s;
221 	size_t m;
222 
223 	for (m = 0; m < n; m++)
224 		b[m] = c;
225 
226 	return s;
227 }
228 
229 __inhibit_loop_to_libcall
230 void *asan_memcpy_unchecked(void *__restrict dst, const void *__restrict src,
231 			    size_t len)
232 {
233 	uint8_t *__restrict d = dst;
234 	const uint8_t *__restrict s = src;
235 	size_t n;
236 
237 	for (n = 0; n < len; n++)
238 		d[n] = s[n];
239 
240 	return dst;
241 }
242 
243 void asan_start(void)
244 {
245 	assert(GET_ASAN_INFO()->regs_count > 0 && !asan_active);
246 	asan_active = true;
247 }
248 
249 void asan_set_panic_cb(asan_panic_cb_t panic_cb)
250 {
251 	asan_panic_cb = panic_cb;
252 }
253 
254 static void asan_report(vaddr_t addr, size_t size)
255 {
256 #ifdef KASAN_DUMP_SHADOW
257 	char buf[128] = {0};
258 	int r = 0, rc = 0;
259 	vaddr_t b = 0, e = 0, saddr = 0;
260 
261 	b = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) - ASAN_BLOCK_SIZE * 2;
262 	e = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) + ASAN_BLOCK_SIZE * 2;
263 
264 	/* Print shadow map nearby */
265 	if (va_range_inside_shadow((void *)b, (void *)e)) {
266 		rc = snprintk(buf + r, sizeof(buf) - r, "%lx: ", b);
267 		assert(rc > 0);
268 		r += rc;
269 		for (saddr = b; saddr <= e; saddr += ASAN_BLOCK_SIZE) {
270 			int8_t *sbyte = va_to_shadow((void *)saddr);
271 
272 			rc = snprintk(buf + r, sizeof(buf) - r,
273 				      "0x%02x ", (uint8_t)*sbyte);
274 			assert(rc > 0);
275 			r += rc;
276 		}
277 		EMSG("%s", buf);
278 	}
279 #endif
280 	EMSG("[ASAN]: access violation, addr: %#"PRIxVA" size: %zu",
281 	      addr, size);
282 
283 	asan_panic_cb();
284 }
285 
286 static __always_inline bool asan_shadow_1byte_isvalid(vaddr_t addr)
287 {
288 	int8_t last = (addr & ASAN_BLOCK_MASK) + 1;
289 	int8_t *byte = va_to_shadow((void *)addr);
290 
291 	if (*byte == 0 || last <= *byte)
292 		return true;
293 
294 	return false;
295 }
296 
297 static __always_inline bool asan_shadow_2byte_isvalid(vaddr_t addr)
298 {
299 	if (addr_crosses_scale_boundary(addr, 2)) {
300 		return (asan_shadow_1byte_isvalid(addr) &&
301 			asan_shadow_1byte_isvalid(addr + 1));
302 	} else {
303 		int8_t last = ((addr + 1) & ASAN_BLOCK_MASK) + 1;
304 		int8_t *byte = va_to_shadow((void *)addr);
305 
306 		if (*byte == 0 || last <= *byte)
307 			return true;
308 
309 		return false;
310 	}
311 }
312 
313 static __always_inline bool asan_shadow_4byte_isvalid(vaddr_t addr)
314 {
315 	if (addr_crosses_scale_boundary(addr, 4)) {
316 		return (asan_shadow_2byte_isvalid(addr) &&
317 			asan_shadow_2byte_isvalid(addr + 2));
318 	} else {
319 		int8_t last = ((addr + 3) & ASAN_BLOCK_MASK) + 1;
320 		int8_t *byte = va_to_shadow((void *)addr);
321 
322 		if (*byte == 0 || last <= *byte)
323 			return true;
324 
325 		return false;
326 	}
327 }
328 
329 static __always_inline bool asan_shadow_8byte_isvalid(vaddr_t addr)
330 {
331 	if (addr_crosses_scale_boundary(addr, 8)) {
332 		return (asan_shadow_4byte_isvalid(addr) &&
333 			asan_shadow_4byte_isvalid(addr + 4));
334 	} else {
335 		int8_t last = ((addr + 7) & ASAN_BLOCK_MASK) + 1;
336 		int8_t *byte = va_to_shadow((void *)addr);
337 
338 		if (*byte == 0 || last <= *byte)
339 			return true;
340 
341 		return false;
342 	}
343 }
344 
345 static __always_inline bool asan_shadow_Nbyte_isvalid(vaddr_t addr,
346 						      size_t size)
347 {
348 	size_t i = 0;
349 
350 	for (; i < size; i++) {
351 		if (!asan_shadow_1byte_isvalid(addr + i))
352 			return false;
353 	}
354 
355 	return true;
356 }
357 
358 static __always_inline void check_access(vaddr_t addr, size_t size)
359 {
360 	bool valid = false;
361 	void *begin = (void *)addr;
362 	void *end = (void *)(addr + size);
363 
364 	if (!asan_active)
365 		return;
366 	if (size == 0)
367 		return;
368 	if (va_range_outside_shadow(begin, end))
369 		return;
370 	/*
371 	 * If it isn't outside it has to be completely inside or there's a
372 	 * problem.
373 	 */
374 	if (!va_range_inside_shadow(begin, end))
375 		asan_panic();
376 
377 	if (__builtin_constant_p(size)) {
378 		switch (size) {
379 		case 1:
380 			valid = asan_shadow_1byte_isvalid(addr);
381 			break;
382 		case 2:
383 			valid = asan_shadow_2byte_isvalid(addr);
384 			break;
385 		case 4:
386 			valid = asan_shadow_4byte_isvalid(addr);
387 			break;
388 		case 8:
389 			valid = asan_shadow_8byte_isvalid(addr);
390 			break;
391 		default:
392 			valid = asan_shadow_Nbyte_isvalid(addr, size);
393 			break;
394 		}
395 	} else {
396 		valid = asan_shadow_Nbyte_isvalid(addr, size);
397 	}
398 
399 	if (!valid)
400 		asan_report(addr, size);
401 }
402 
403 static __always_inline void check_load(vaddr_t addr, size_t size)
404 {
405 	check_access(addr, size);
406 }
407 
408 static __always_inline void check_store(vaddr_t addr, size_t size)
409 {
410 	check_access(addr, size);
411 }
412 
413 static void __noreturn report_load(vaddr_t addr __unused, size_t size __unused)
414 {
415 	asan_panic();
416 }
417 
418 static void __noreturn report_store(vaddr_t addr __unused, size_t size __unused)
419 {
420 	asan_panic();
421 }
422 
423 
424 
425 #define DEFINE_ASAN_FUNC(type, size)				\
426 	void __asan_##type##size(vaddr_t addr);			\
427 	void __asan_##type##size(vaddr_t addr)			\
428 	{ check_##type(addr, size); }				\
429 	void __asan_##type##size##_noabort(vaddr_t addr);	\
430 	void __asan_##type##size##_noabort(vaddr_t addr)	\
431 	{ check_##type(addr, size); }				\
432 	void __asan_report_##type##size##_noabort(vaddr_t addr);\
433 	void __noreturn __asan_report_##type##size##_noabort(vaddr_t addr) \
434 	{ report_##type(addr, size); }
435 
436 DEFINE_ASAN_FUNC(load, 1)
437 DEFINE_ASAN_FUNC(load, 2)
438 DEFINE_ASAN_FUNC(load, 4)
439 DEFINE_ASAN_FUNC(load, 8)
440 DEFINE_ASAN_FUNC(load, 16)
441 DEFINE_ASAN_FUNC(store, 1)
442 DEFINE_ASAN_FUNC(store, 2)
443 DEFINE_ASAN_FUNC(store, 4)
444 DEFINE_ASAN_FUNC(store, 8)
445 DEFINE_ASAN_FUNC(store, 16)
446 
447 void __asan_loadN_noabort(vaddr_t addr, size_t size);
448 void __asan_loadN_noabort(vaddr_t addr, size_t size)
449 {
450 	check_load(addr, size);
451 }
452 
453 void __asan_storeN_noabort(vaddr_t addr, size_t size);
454 void __asan_storeN_noabort(vaddr_t addr, size_t size)
455 {
456 	check_store(addr, size);
457 }
458 
459 void __asan_report_load_n_noabort(vaddr_t addr, size_t size);
460 void __noreturn __asan_report_load_n_noabort(vaddr_t addr, size_t size)
461 {
462 	report_load(addr, size);
463 }
464 
465 void __asan_report_store_n_noabort(vaddr_t addr, size_t size);
466 void __noreturn __asan_report_store_n_noabort(vaddr_t addr, size_t size)
467 {
468 	report_store(addr, size);
469 }
470 
471 void __asan_handle_no_return(void);
472 void __asan_handle_no_return(void)
473 {
474 }
475 
476 void __asan_register_globals(struct asan_global *globals, size_t size);
477 void __asan_register_globals(struct asan_global *globals, size_t size)
478 {
479 	size_t n = 0;
480 
481 	for (n = 0; n < size; n++) {
482 		vaddr_t begin = globals[n].beg;
483 		vaddr_t end = begin + globals[n].size;
484 		vaddr_t end_align = ROUNDUP(end, ASAN_BLOCK_SIZE);
485 		vaddr_t end_rz = begin + globals[n].size_with_redzone;
486 
487 		asan_tag_access((void *)begin, (void *)end);
488 		asan_tag_no_access((void *)end_align, (void *)end_rz);
489 	}
490 }
491 DECLARE_KEEP_INIT(__asan_register_globals);
492 
493 void __asan_unregister_globals(struct asan_global *globals, size_t size);
494 void __asan_unregister_globals(struct asan_global *globals __unused,
495 			       size_t size __unused)
496 {
497 }
498 
499 void asan_handle_longjmp(void *old_sp)
500 {
501 	void *top = old_sp;
502 	void *bottom = (void *)ROUNDDOWN((vaddr_t)&top,
503 					 ASAN_BLOCK_SIZE);
504 
505 	asan_tag_access(bottom, top);
506 }
507 
508 #if !defined(__KERNEL__)
509 
510 static int asan_map_shadow_region(vaddr_t lo, vaddr_t hi)
511 {
512 	struct asan_global_info *asan_info = GET_ASAN_INFO();
513 	TEE_Result rc = TEE_SUCCESS;
514 	size_t sz = hi - lo;
515 	vaddr_t req = lo;
516 
517 	if (asan_info->s_regs_count >= ASAN_VA_REGS_MAX)
518 		return -1;
519 
520 #if defined(__LDELF__)
521 	rc = _ldelf_map_zi(&req, sz, 0, 0, 0);
522 #else
523 	req = (vaddr_t)tee_map_zi_va(req, sz, 0);
524 #endif
525 	if (rc != TEE_SUCCESS)
526 		return -1;
527 	if (req != lo)
528 		return -1;
529 
530 	asan_info->s_regs[asan_info->s_regs_count++] =
531 		(struct asan_va_reg){ lo, hi };
532 
533 	return 0;
534 }
535 
536 int asan_user_map_shadow(void *lo, void *hi)
537 {
538 	vaddr_t lo_s = 0;
539 	vaddr_t hi_s = 0;
540 	int rc = 0;
541 
542 	if (lo == hi)
543 		return -1;
544 
545 	lo_s = ROUNDDOWN((vaddr_t)va_to_shadow(lo), SMALL_PAGE_SIZE);
546 	hi_s = ROUNDUP((vaddr_t)va_to_shadow(hi), SMALL_PAGE_SIZE);
547 
548 	if (lo_s >= hi_s)
549 		return -1;
550 	if (hi >= (void *)GET_ASAN_INFO())
551 		return -1;
552 
553 	/*
554 	 * Walk the already mapped shadow ranges and trim [lo_s, hi_s)
555 	 * down to the part that is still missing.
556 	 */
557 	for (size_t i = 0; i < GET_ASAN_INFO()->s_regs_count; i++) {
558 		vaddr_t reg_lo_s = GET_ASAN_INFO()->s_regs[i].lo;
559 		vaddr_t reg_hi_s = GET_ASAN_INFO()->s_regs[i].hi;
560 
561 		if (reg_hi_s <= lo_s || reg_lo_s >= hi_s) {
562 			/*
563 			 * This mapped range does not intersect the
564 			 * requested range. Skip it.
565 			 */
566 			continue;
567 		}
568 		if (reg_lo_s <= lo_s && reg_hi_s >= hi_s) {
569 			/*
570 			 * The requested shadow range is already fully
571 			 * mapped, so there is nothing left to do.
572 			 */
573 			goto out;
574 		}
575 		if (reg_lo_s <= lo_s && reg_hi_s < hi_s) {
576 			/*
577 			 * The mapped range covers the left side of
578 			 * the requested range.
579 			 */
580 			lo_s = reg_hi_s;
581 			continue;
582 		}
583 		if (reg_lo_s > lo_s && reg_hi_s >= hi_s) {
584 			/*
585 			 * The mapped range covers the right side of
586 			 * the requested range.
587 			 */
588 			hi_s = reg_lo_s;
589 			continue;
590 		}
591 		/*
592 		 * If we are here then there is a problem, that shouldn't
593 		 * happen for valid shadow mapping intervals.
594 		 */
595 		EMSG("can't handle: reg_lo_s %#"PRIxVA" reg_hi_s %#"
596 		     PRIxVA" lo_s %#"PRIxVA" hi_s %#"PRIxVA, reg_lo_s,
597 		     reg_hi_s, lo_s, hi_s);
598 		asan_panic();
599 	}
600 	/*
601 	 * If we reach this point, [lo_s, hi_s) is the remaining shadow
602 	 * gap that still needs to be mapped.
603 	 */
604 	assert(hi_s > lo_s);
605 	rc = asan_map_shadow_region(lo_s, hi_s);
606 	if (rc) {
607 		EMSG("Failed to map shadow region");
608 		asan_panic();
609 	}
610 out:
611 	/* Remember the original VA range as checked by ASan. */
612 	asan_add_shadowed(lo, hi);
613 	return 0;
614 }
615 
616 #else
617 
618 int asan_user_map_shadow(void *lo __unused, void *hi __unused)
619 {
620 	return 0;
621 }
622 #endif
623