xref: /optee_os/lib/libutils/ext/asan.c (revision 4cafd8a3f88594ce06758c2a85b59c2d32a6ac7e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5  */
6 
7 #include <asan.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <printk.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <trace.h>
14 #include <types_ext.h>
15 #include <util.h>
16 
17 #if __GCC_VERSION >= 70000
18 #define ASAN_ABI_VERSION 7
19 #else
20 #define ASAN_ABI_VERSION 6
21 #endif
22 
23 #if defined(__KERNEL__)
24 # include <keep.h>
25 # include <kernel/panic.h>
26 #elif defined(__LDELF__)
27 # include <ldelf_syscalls.h>
28 # include <ldelf.h>
29 #else
30 # error "Not implemented"
31 #endif
32 
33 #ifndef __KERNEL__
34 /* Stub for non-kernel builds */
35 #define DECLARE_KEEP_INIT(x)
36 #endif
37 
38 #ifndef SMALL_PAGE_SIZE
39 #define SMALL_PAGE_SIZE 4096
40 #endif
41 
42 struct asan_source_location {
43 	const char *file_name;
44 	int line_no;
45 	int column_no;
46 };
47 
48 struct asan_global {
49 	uintptr_t beg;
50 	uintptr_t size;
51 	uintptr_t size_with_redzone;
52 	const char *name;
53 	const char *module_name;
54 	uintptr_t has_dynamic_init;
55 	struct asan_source_location *location;
56 #if ASAN_ABI_VERSION >= 7
57 	uintptr_t odr_indicator;
58 #endif
59 };
60 
61 #ifdef __KERNEL__
62 static struct asan_global_info __asan_global_info;
63 #endif
64 
65 static bool asan_active;
66 static asan_panic_cb_t asan_panic_cb = asan_panic;
67 
68 void __noreturn asan_panic(void)
69 {
70 #if defined(__KERNEL__)
71 	panic();
72 #elif defined(__LDELF__)
73 	_ldelf_panic(2);
74 #else
75 #error "Not implemented"
76 #endif
77 	/*
78 	 * _utee_panic (which will be used here) is not marked as noreturn.
79 	 * See _utee_panic prototype in utee_syscalls.h for reasoning. To
80 	 * prevent "‘noreturn’ function does return" warning the while loop
81 	 * is used.
82 	 */
83 	while (1)
84 		;
85 }
86 
87 static bool addr_crosses_scale_boundary(vaddr_t addr, size_t size)
88 {
89 	return (addr >> ASAN_BLOCK_SHIFT) !=
90 	       ((addr + size - 1) >> ASAN_BLOCK_SHIFT);
91 }
92 
93 static int8_t *va_to_shadow(const void *va)
94 {
95 #if defined(__KERNEL__)
96 	vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
97 		     CFG_ASAN_SHADOW_OFFSET;
98 #else
99 	vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
100 		     CFG_USER_ASAN_SHADOW_OFFSET;
101 #endif
102 	return (int8_t *)sa;
103 }
104 
105 static size_t va_range_to_shadow_size(const void *begin, const void *end)
106 {
107 	return ((vaddr_t)end - (vaddr_t)begin) / ASAN_BLOCK_SIZE;
108 }
109 
110 static bool va_range_inside_shadow(const void *begin, const void *end)
111 {
112 	struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
113 	vaddr_t b = (vaddr_t)begin;
114 	vaddr_t e = (vaddr_t)end;
115 	unsigned int i = 0;
116 
117 	if (b >= e)
118 		return false;
119 
120 	for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
121 		if (b >= regs[i].lo && e <= regs[i].hi) {
122 			/* Access is covered fully by at least one region */
123 			return true;
124 		}
125 	}
126 
127 	return false;
128 }
129 
130 static bool va_range_outside_shadow(const void *begin, const void *end)
131 {
132 	struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
133 	vaddr_t b = (vaddr_t)begin;
134 	vaddr_t e = (vaddr_t)end;
135 	unsigned int i = 0;
136 
137 	if (b >= e)
138 		return false;
139 
140 	for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
141 		if (b < regs[i].hi && e > regs[i].lo) {
142 			/* Access covers region at least partly */
143 			return false;
144 		}
145 	}
146 
147 	return true;
148 }
149 
150 static size_t va_misalignment(const void *va)
151 {
152 	return (vaddr_t)va & ASAN_BLOCK_MASK;
153 }
154 
155 static bool va_is_well_aligned(const void *va)
156 {
157 	return !va_misalignment(va);
158 }
159 
160 void asan_add_shadowed(const void *begin, const void *end)
161 {
162 	struct asan_va_reg reg = {(vaddr_t)begin, (vaddr_t)end};
163 	struct asan_global_info *asan_info = GET_ASAN_INFO();
164 
165 	assert(va_is_well_aligned(begin));
166 	assert(va_is_well_aligned(end));
167 	assert(reg.lo < reg.hi);
168 	if (asan_info->regs_count < ASAN_VA_REGS_MAX) {
169 		asan_info->regs[asan_info->regs_count++] = reg;
170 	} else {
171 		EMSG("No free regions to allocate");
172 		asan_panic();
173 	}
174 }
175 
176 void asan_tag_no_access(const void *begin, const void *end)
177 {
178 	assert(va_is_well_aligned(begin));
179 	assert(va_is_well_aligned(end));
180 	assert(va_range_inside_shadow(begin, end));
181 
182 	asan_memset_unchecked(va_to_shadow(begin), ASAN_DATA_RED_ZONE,
183 			      va_range_to_shadow_size(begin, end));
184 }
185 
186 void asan_tag_access(const void *begin, const void *end)
187 {
188 	if (!GET_ASAN_INFO()->regs_count || begin == end)
189 		return;
190 
191 	assert(va_range_inside_shadow(begin, end));
192 	assert(va_is_well_aligned(begin));
193 
194 	asan_memset_unchecked(va_to_shadow(begin), 0,
195 			      va_range_to_shadow_size(begin, end));
196 	if (!va_is_well_aligned(end))
197 		*va_to_shadow(end) = va_misalignment(end);
198 }
199 
200 void asan_tag_heap_free(const void *begin, const void *end)
201 {
202 	if (!GET_ASAN_INFO()->regs_count)
203 		return;
204 
205 	assert(va_range_inside_shadow(begin, end));
206 	assert(va_is_well_aligned(begin));
207 	assert(va_is_well_aligned(end));
208 
209 	asan_memset_unchecked(va_to_shadow(begin), ASAN_HEAP_RED_ZONE,
210 			      va_range_to_shadow_size(begin, end));
211 }
212 
213 __inhibit_loop_to_libcall void *asan_memset_unchecked(void *s, int c, size_t n)
214 {
215 	uint8_t *b = s;
216 	size_t m;
217 
218 	for (m = 0; m < n; m++)
219 		b[m] = c;
220 
221 	return s;
222 }
223 
224 __inhibit_loop_to_libcall
225 void *asan_memcpy_unchecked(void *__restrict dst, const void *__restrict src,
226 			    size_t len)
227 {
228 	uint8_t *__restrict d = dst;
229 	const uint8_t *__restrict s = src;
230 	size_t n;
231 
232 	for (n = 0; n < len; n++)
233 		d[n] = s[n];
234 
235 	return dst;
236 }
237 
238 void asan_start(void)
239 {
240 	assert(GET_ASAN_INFO()->regs_count > 0 && !asan_active);
241 	asan_active = true;
242 }
243 
244 void asan_set_panic_cb(asan_panic_cb_t panic_cb)
245 {
246 	asan_panic_cb = panic_cb;
247 }
248 
249 static void asan_report(vaddr_t addr, size_t size)
250 {
251 #ifdef KASAN_DUMP_SHADOW
252 	char buf[128] = {0};
253 	int r = 0, rc = 0;
254 	vaddr_t b = 0, e = 0, saddr = 0;
255 
256 	b = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) - ASAN_BLOCK_SIZE;
257 	e = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) + ASAN_BLOCK_SIZE;
258 
259 	/* Print shadow map nearby */
260 	if (va_range_inside_shadow((void *)b, (void *)e)) {
261 		rc = snprintk(buf + r, sizeof(buf) - r, "%lx: ", b);
262 		assert(rc > 0);
263 		r += rc;
264 		for (saddr = b; saddr <= e; saddr += ASAN_BLOCK_SIZE) {
265 			int8_t *sbyte = va_to_shadow((void *)saddr);
266 
267 			rc = snprintk(buf + r, sizeof(buf) - r,
268 				      "0x%02x ", (uint8_t)*sbyte);
269 			assert(rc > 0);
270 			r += rc;
271 		}
272 		EMSG("%s", buf);
273 	}
274 #endif
275 	EMSG("[ASAN]: access violation, addr: %lx size: %zu\n",
276 	     addr, size);
277 
278 	asan_panic_cb();
279 }
280 
281 static __always_inline bool asan_shadow_1byte_isvalid(vaddr_t addr)
282 {
283 	int8_t last = (addr & ASAN_BLOCK_MASK) + 1;
284 	int8_t *byte = va_to_shadow((void *)addr);
285 
286 	if (*byte == 0 || last <= *byte)
287 		return true;
288 
289 	return false;
290 }
291 
292 static __always_inline bool asan_shadow_2byte_isvalid(vaddr_t addr)
293 {
294 	if (addr_crosses_scale_boundary(addr, 2)) {
295 		return (asan_shadow_1byte_isvalid(addr) &&
296 			asan_shadow_1byte_isvalid(addr + 1));
297 	} else {
298 		int8_t last = ((addr + 1) & ASAN_BLOCK_MASK) + 1;
299 		int8_t *byte = va_to_shadow((void *)addr);
300 
301 		if (*byte == 0 || last <= *byte)
302 			return true;
303 
304 		return false;
305 	}
306 }
307 
308 static __always_inline bool asan_shadow_4byte_isvalid(vaddr_t addr)
309 {
310 	if (addr_crosses_scale_boundary(addr, 4)) {
311 		return (asan_shadow_2byte_isvalid(addr) &&
312 			asan_shadow_2byte_isvalid(addr + 2));
313 	} else {
314 		int8_t last = ((addr + 3) & ASAN_BLOCK_MASK) + 1;
315 		int8_t *byte = va_to_shadow((void *)addr);
316 
317 		if (*byte == 0 || last <= *byte)
318 			return true;
319 
320 		return false;
321 	}
322 }
323 
324 static __always_inline bool asan_shadow_8byte_isvalid(vaddr_t addr)
325 {
326 	if (addr_crosses_scale_boundary(addr, 8)) {
327 		return (asan_shadow_4byte_isvalid(addr) &&
328 			asan_shadow_4byte_isvalid(addr + 4));
329 	} else {
330 		int8_t last = ((addr + 7) & ASAN_BLOCK_MASK) + 1;
331 		int8_t *byte = va_to_shadow((void *)addr);
332 
333 		if (*byte == 0 || last <= *byte)
334 			return true;
335 
336 		return false;
337 	}
338 }
339 
340 static __always_inline bool asan_shadow_Nbyte_isvalid(vaddr_t addr,
341 						      size_t size)
342 {
343 	size_t i = 0;
344 
345 	for (; i < size; i++) {
346 		if (!asan_shadow_1byte_isvalid(addr + i))
347 			return false;
348 	}
349 
350 	return true;
351 }
352 
353 static __always_inline void check_access(vaddr_t addr, size_t size)
354 {
355 	bool valid = false;
356 	void *begin = (void *)addr;
357 	void *end = (void *)(addr + size);
358 
359 	if (!asan_active)
360 		return;
361 	if (size == 0)
362 		return;
363 	if (va_range_outside_shadow(begin, end))
364 		return;
365 	/*
366 	 * If it isn't outside it has to be completely inside or there's a
367 	 * problem.
368 	 */
369 	if (!va_range_inside_shadow(begin, end))
370 		asan_panic();
371 
372 	if (__builtin_constant_p(size)) {
373 		switch (size) {
374 		case 1:
375 			valid = asan_shadow_1byte_isvalid(addr);
376 			break;
377 		case 2:
378 			valid = asan_shadow_2byte_isvalid(addr);
379 			break;
380 		case 4:
381 			valid = asan_shadow_4byte_isvalid(addr);
382 			break;
383 		case 8:
384 			valid = asan_shadow_8byte_isvalid(addr);
385 			break;
386 		default:
387 			valid = asan_shadow_Nbyte_isvalid(addr, size);
388 			break;
389 		}
390 	} else {
391 		valid = asan_shadow_Nbyte_isvalid(addr, size);
392 	}
393 
394 	if (!valid)
395 		asan_report(addr, size);
396 }
397 
398 static __always_inline void check_load(vaddr_t addr, size_t size)
399 {
400 	check_access(addr, size);
401 }
402 
403 static __always_inline void check_store(vaddr_t addr, size_t size)
404 {
405 	check_access(addr, size);
406 }
407 
408 static void __noreturn report_load(vaddr_t addr __unused, size_t size __unused)
409 {
410 	asan_panic();
411 }
412 
413 static void __noreturn report_store(vaddr_t addr __unused, size_t size __unused)
414 {
415 	asan_panic();
416 }
417 
418 
419 
420 #define DEFINE_ASAN_FUNC(type, size)				\
421 	void __asan_##type##size(vaddr_t addr);			\
422 	void __asan_##type##size(vaddr_t addr)			\
423 	{ check_##type(addr, size); }				\
424 	void __asan_##type##size##_noabort(vaddr_t addr);	\
425 	void __asan_##type##size##_noabort(vaddr_t addr)	\
426 	{ check_##type(addr, size); }				\
427 	void __asan_report_##type##size##_noabort(vaddr_t addr);\
428 	void __noreturn __asan_report_##type##size##_noabort(vaddr_t addr) \
429 	{ report_##type(addr, size); }
430 
431 DEFINE_ASAN_FUNC(load, 1)
432 DEFINE_ASAN_FUNC(load, 2)
433 DEFINE_ASAN_FUNC(load, 4)
434 DEFINE_ASAN_FUNC(load, 8)
435 DEFINE_ASAN_FUNC(load, 16)
436 DEFINE_ASAN_FUNC(store, 1)
437 DEFINE_ASAN_FUNC(store, 2)
438 DEFINE_ASAN_FUNC(store, 4)
439 DEFINE_ASAN_FUNC(store, 8)
440 DEFINE_ASAN_FUNC(store, 16)
441 
442 void __asan_loadN_noabort(vaddr_t addr, size_t size);
443 void __asan_loadN_noabort(vaddr_t addr, size_t size)
444 {
445 	check_load(addr, size);
446 }
447 
448 void __asan_storeN_noabort(vaddr_t addr, size_t size);
449 void __asan_storeN_noabort(vaddr_t addr, size_t size)
450 {
451 	check_store(addr, size);
452 }
453 
454 void __asan_report_load_n_noabort(vaddr_t addr, size_t size);
455 void __noreturn __asan_report_load_n_noabort(vaddr_t addr, size_t size)
456 {
457 	report_load(addr, size);
458 }
459 
460 void __asan_report_store_n_noabort(vaddr_t addr, size_t size);
461 void __noreturn __asan_report_store_n_noabort(vaddr_t addr, size_t size)
462 {
463 	report_store(addr, size);
464 }
465 
466 void __asan_handle_no_return(void);
467 void __asan_handle_no_return(void)
468 {
469 }
470 
471 void __asan_register_globals(struct asan_global *globals, size_t size);
472 void __asan_register_globals(struct asan_global *globals, size_t size)
473 {
474 	size_t n = 0;
475 
476 	for (n = 0; n < size; n++) {
477 		vaddr_t begin = globals[n].beg;
478 		vaddr_t end = begin + globals[n].size;
479 		vaddr_t end_align = ROUNDUP(end, ASAN_BLOCK_SIZE);
480 		vaddr_t end_rz = begin + globals[n].size_with_redzone;
481 
482 		asan_tag_access((void *)begin, (void *)end);
483 		asan_tag_no_access((void *)end_align, (void *)end_rz);
484 	}
485 }
486 DECLARE_KEEP_INIT(__asan_register_globals);
487 
488 void __asan_unregister_globals(struct asan_global *globals, size_t size);
489 void __asan_unregister_globals(struct asan_global *globals __unused,
490 			       size_t size __unused)
491 {
492 }
493 
494 void asan_handle_longjmp(void *old_sp)
495 {
496 	void *top = old_sp;
497 	void *bottom = (void *)ROUNDDOWN((vaddr_t)&top,
498 					 ASAN_BLOCK_SIZE);
499 
500 	asan_tag_access(bottom, top);
501 }
502 
503 #if !defined(__KERNEL__)
504 
505 static int asan_map_shadow_region(vaddr_t lo, vaddr_t hi)
506 {
507 	struct asan_global_info *asan_info = GET_ASAN_INFO();
508 	TEE_Result rc = TEE_SUCCESS;
509 	size_t sz = hi - lo;
510 	vaddr_t req = lo;
511 
512 	if (asan_info->s_regs_count >= ASAN_VA_REGS_MAX)
513 		return -1;
514 
515 #if defined(__LDELF__)
516 	rc = _ldelf_map_zi(&req, sz, 0, 0, 0);
517 #else
518 #error "Not implemented"
519 #endif
520 	if (rc != TEE_SUCCESS)
521 		return -1;
522 	if (req != lo)
523 		return -1;
524 
525 	asan_info->s_regs[asan_info->s_regs_count++] =
526 		(struct asan_va_reg){ lo, hi };
527 
528 	return 0;
529 }
530 
531 int asan_user_map_shadow(void *lo, void *hi)
532 {
533 	vaddr_t lo_s = 0;
534 	vaddr_t hi_s = 0;
535 	int rc = 0;
536 
537 	if (lo == hi)
538 		return -1;
539 
540 	lo_s = ROUNDDOWN((vaddr_t)va_to_shadow(lo), SMALL_PAGE_SIZE);
541 	hi_s = ROUNDUP((vaddr_t)va_to_shadow(hi), SMALL_PAGE_SIZE);
542 
543 	if (lo_s >= hi_s)
544 		return -1;
545 	if (hi >= (void *)GET_ASAN_INFO())
546 		return -1;
547 
548 	/*
549 	 * Walk the already mapped shadow ranges and trim [lo_s, hi_s)
550 	 * down to the part that is still missing.
551 	 */
552 	for (size_t i = 0; i < GET_ASAN_INFO()->s_regs_count; i++) {
553 		vaddr_t reg_lo_s = GET_ASAN_INFO()->s_regs[i].lo;
554 		vaddr_t reg_hi_s = GET_ASAN_INFO()->s_regs[i].hi;
555 
556 		if (reg_hi_s <= lo_s || reg_lo_s >= hi_s) {
557 			/*
558 			 * This mapped range does not intersect the
559 			 * requested range. Skip it.
560 			 */
561 			continue;
562 		}
563 		if (reg_lo_s <= lo_s && reg_hi_s >= hi_s) {
564 			/*
565 			 * The requested shadow range is already fully
566 			 * mapped, so there is nothing left to do.
567 			 */
568 			goto out;
569 		}
570 		if (reg_lo_s <= lo_s && reg_hi_s < hi_s) {
571 			/*
572 			 * The mapped range covers the left side of
573 			 * the requested range.
574 			 */
575 			lo_s = reg_hi_s;
576 			continue;
577 		}
578 		if (reg_lo_s > lo_s && reg_hi_s >= hi_s) {
579 			/*
580 			 * The mapped range covers the right side of
581 			 * the requested range.
582 			 */
583 			hi_s = reg_lo_s;
584 			continue;
585 		}
586 		/*
587 		 * If we are here then there is a problem, that shouldn't
588 		 * happen for valid shadow mapping intervals.
589 		 */
590 		EMSG("can't handle: reg_lo_s %#"PRIxVA" reg_hi_s %#"
591 		     PRIxVA" lo_s %#"PRIxVA" hi_s %#"PRIxVA, reg_lo_s,
592 		     reg_hi_s, lo_s, hi_s);
593 		asan_panic();
594 	}
595 	/*
596 	 * If we reach this point, [lo_s, hi_s) is the remaining shadow
597 	 * gap that still needs to be mapped.
598 	 */
599 	assert(hi_s > lo_s);
600 	rc = asan_map_shadow_region(lo_s, hi_s);
601 	if (rc) {
602 		EMSG("Failed to map shadow region");
603 		asan_panic();
604 	}
605 out:
606 	/* Remember the original VA range as checked by ASan. */
607 	asan_add_shadowed(lo, hi);
608 	return 0;
609 }
610 
611 #else
612 
613 int asan_user_map_shadow(void *lo __unused, void *hi __unused)
614 {
615 	return 0;
616 }
617 #endif
618