1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 */
6
7 #include <asan.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <printk.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <trace.h>
14 #include <types_ext.h>
15 #include <util.h>
16
17 #if __GCC_VERSION >= 70000
18 #define ASAN_ABI_VERSION 7
19 #else
20 #define ASAN_ABI_VERSION 6
21 #endif
22
23 #if defined(__KERNEL__)
24 # include <keep.h>
25 # include <kernel/panic.h>
26 #elif defined(__LDELF__)
27 # include <ldelf_syscalls.h>
28 # include <ldelf.h>
29 #else
30 # include <tee_internal_api_extensions.h>
31 # include <utee_syscalls.h>
32 #endif
33
34 #ifndef __KERNEL__
35 /* Stub for non-kernel builds */
36 #define DECLARE_KEEP_INIT(x)
37 #endif
38
39 #ifndef SMALL_PAGE_SIZE
40 #define SMALL_PAGE_SIZE 4096
41 #endif
42
43 #if TRACE_LEVEL >= TRACE_DEBUG
44 #define KASAN_DUMP_SHADOW
45 #endif
46
47 struct asan_source_location {
48 const char *file_name;
49 int line_no;
50 int column_no;
51 };
52
53 struct asan_global {
54 uintptr_t beg;
55 uintptr_t size;
56 uintptr_t size_with_redzone;
57 const char *name;
58 const char *module_name;
59 uintptr_t has_dynamic_init;
60 struct asan_source_location *location;
61 #if ASAN_ABI_VERSION >= 7
62 uintptr_t odr_indicator;
63 #endif
64 };
65
66 #ifdef __KERNEL__
67 static struct asan_global_info __asan_global_info;
68 #endif
69
70 static bool asan_active;
71 static asan_panic_cb_t asan_panic_cb = asan_panic;
72
asan_panic(void)73 void __noreturn asan_panic(void)
74 {
75 #if defined(__KERNEL__)
76 panic();
77 #elif defined(__LDELF__)
78 _ldelf_panic(2);
79 #else
80 _utee_panic(TEE_ERROR_GENERIC);
81 #endif
82 /*
83 * _utee_panic is not marked as noreturn.
84 * See _utee_panic prototype in utee_syscalls.h for reasoning. To
85 * prevent "‘noreturn’ function does return" warning the while loop
86 * is used.
87 */
88 while (1)
89 ;
90 }
91
addr_crosses_scale_boundary(vaddr_t addr,size_t size)92 static bool addr_crosses_scale_boundary(vaddr_t addr, size_t size)
93 {
94 return (addr >> ASAN_BLOCK_SHIFT) !=
95 ((addr + size - 1) >> ASAN_BLOCK_SHIFT);
96 }
97
va_to_shadow(const void * va)98 static int8_t *va_to_shadow(const void *va)
99 {
100 #if defined(__KERNEL__)
101 vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
102 CFG_ASAN_SHADOW_OFFSET;
103 #else
104 vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) +
105 CFG_USER_ASAN_SHADOW_OFFSET;
106 #endif
107 return (int8_t *)sa;
108 }
109
va_range_to_shadow_size(const void * begin,const void * end)110 static size_t va_range_to_shadow_size(const void *begin, const void *end)
111 {
112 return ((vaddr_t)end - (vaddr_t)begin) / ASAN_BLOCK_SIZE;
113 }
114
va_range_inside_shadow(const void * begin,const void * end)115 static bool va_range_inside_shadow(const void *begin, const void *end)
116 {
117 struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
118 vaddr_t b = (vaddr_t)begin;
119 vaddr_t e = (vaddr_t)end;
120 unsigned int i = 0;
121
122 if (b >= e)
123 return false;
124
125 for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
126 if (b >= regs[i].lo && e <= regs[i].hi) {
127 /* Access is covered fully by at least one region */
128 return true;
129 }
130 }
131
132 return false;
133 }
134
va_range_outside_shadow(const void * begin,const void * end)135 static bool va_range_outside_shadow(const void *begin, const void *end)
136 {
137 struct asan_va_reg *regs = GET_ASAN_INFO()->regs;
138 vaddr_t b = (vaddr_t)begin;
139 vaddr_t e = (vaddr_t)end;
140 unsigned int i = 0;
141
142 if (b >= e)
143 return false;
144
145 for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) {
146 if (b < regs[i].hi && e > regs[i].lo) {
147 /* Access covers region at least partly */
148 return false;
149 }
150 }
151
152 return true;
153 }
154
va_misalignment(const void * va)155 static size_t va_misalignment(const void *va)
156 {
157 return (vaddr_t)va & ASAN_BLOCK_MASK;
158 }
159
va_is_well_aligned(const void * va)160 static bool va_is_well_aligned(const void *va)
161 {
162 return !va_misalignment(va);
163 }
164
asan_add_shadowed(const void * begin,const void * end,enum asan_va_reg_type type)165 void asan_add_shadowed(const void *begin, const void *end,
166 enum asan_va_reg_type type)
167 {
168 struct asan_global_info *asan_info = GET_ASAN_INFO();
169 struct asan_va_reg reg = {
170 .lo = (vaddr_t)begin,
171 .hi = (vaddr_t)end,
172 };
173 size_t idx = 0;
174
175 assert(va_is_well_aligned(begin));
176 assert(va_is_well_aligned(end));
177 assert(reg.lo < reg.hi);
178 if (asan_info->regs_count < ASAN_VA_REGS_MAX) {
179 idx = asan_info->regs_count++;
180 asan_info->regs[idx] = reg;
181 asan_info->type[idx] = type;
182 } else {
183 EMSG("No free regions to allocate");
184 asan_panic();
185 }
186 }
187
asan_tag_no_access(const void * begin,const void * end)188 void asan_tag_no_access(const void *begin, const void *end)
189 {
190 assert(va_is_well_aligned(begin));
191 assert(va_is_well_aligned(end));
192 assert(va_range_inside_shadow(begin, end));
193
194 asan_memset_unchecked(va_to_shadow(begin), ASAN_DATA_RED_ZONE,
195 va_range_to_shadow_size(begin, end));
196 }
197
asan_tag_access(const void * begin,const void * end)198 void asan_tag_access(const void *begin, const void *end)
199 {
200 if (!GET_ASAN_INFO()->regs_count || begin == end)
201 return;
202
203 assert(va_range_inside_shadow(begin, end));
204 assert(va_is_well_aligned(begin));
205
206 asan_memset_unchecked(va_to_shadow(begin), 0,
207 va_range_to_shadow_size(begin, end));
208 if (!va_is_well_aligned(end))
209 *va_to_shadow(end) = va_misalignment(end);
210 }
211
mpool_exists_in_range(vaddr_t begin,vaddr_t end)212 static bool mpool_exists_in_range(vaddr_t begin, vaddr_t end)
213 {
214 struct asan_global_info *asan_info = GET_ASAN_INFO();
215 struct asan_va_reg *regs = asan_info->regs;
216 unsigned int i = 0;
217
218 for (i = 0; i < asan_info->regs_count; i++) {
219 if (asan_info->type[i] == ASAN_REG_MEM_POOL &&
220 regs[i].hi <= end && regs[i].lo >= begin)
221 return true;
222 }
223
224 return false;
225 }
226
asan_tag_heap_free(const void * begin,const void * end)227 void asan_tag_heap_free(const void *begin, const void *end)
228 {
229 if (!GET_ASAN_INFO()->regs_count)
230 return;
231
232 assert(va_range_inside_shadow(begin, end));
233 assert(va_is_well_aligned(begin));
234 assert(va_is_well_aligned(end));
235
236 asan_memset_unchecked(va_to_shadow(begin), ASAN_HEAP_RED_ZONE,
237 va_range_to_shadow_size(begin, end));
238 }
239
asan_memset_unchecked(void * s,int c,size_t n)240 __inhibit_loop_to_libcall void *asan_memset_unchecked(void *s, int c, size_t n)
241 {
242 uint8_t *b = s;
243 size_t m;
244
245 for (m = 0; m < n; m++)
246 b[m] = c;
247
248 return s;
249 }
250
251 __inhibit_loop_to_libcall
asan_memcpy_unchecked(void * __restrict dst,const void * __restrict src,size_t len)252 void *asan_memcpy_unchecked(void *__restrict dst, const void *__restrict src,
253 size_t len)
254 {
255 uint8_t *__restrict d = dst;
256 const uint8_t *__restrict s = src;
257 size_t n;
258
259 for (n = 0; n < len; n++)
260 d[n] = s[n];
261
262 return dst;
263 }
264
asan_start(void)265 void asan_start(void)
266 {
267 assert(GET_ASAN_INFO()->regs_count > 0 && !asan_active);
268 asan_active = true;
269 }
270
asan_set_panic_cb(asan_panic_cb_t panic_cb)271 void asan_set_panic_cb(asan_panic_cb_t panic_cb)
272 {
273 asan_panic_cb = panic_cb;
274 }
275
asan_report(vaddr_t addr,size_t size)276 static void asan_report(vaddr_t addr, size_t size)
277 {
278 #ifdef KASAN_DUMP_SHADOW
279 char buf[128] = {0};
280 int r = 0, rc = 0;
281 vaddr_t b = 0, e = 0, saddr = 0;
282
283 b = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) - ASAN_BLOCK_SIZE * 2;
284 e = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) + ASAN_BLOCK_SIZE * 2;
285
286 /* Print shadow map nearby */
287 if (va_range_inside_shadow((void *)b, (void *)e)) {
288 rc = snprintk(buf + r, sizeof(buf) - r, "%lx: ", b);
289 assert(rc > 0);
290 r += rc;
291 for (saddr = b; saddr <= e; saddr += ASAN_BLOCK_SIZE) {
292 int8_t *sbyte = va_to_shadow((void *)saddr);
293
294 rc = snprintk(buf + r, sizeof(buf) - r,
295 "0x%02x ", (uint8_t)*sbyte);
296 assert(rc > 0);
297 r += rc;
298 }
299 EMSG("%s", buf);
300 }
301 #endif
302 EMSG("[ASAN]: access violation, addr: %#"PRIxVA" size: %zu",
303 addr, size);
304
305 asan_panic_cb();
306 }
307
asan_shadow_1byte_isvalid(vaddr_t addr)308 static __always_inline bool asan_shadow_1byte_isvalid(vaddr_t addr)
309 {
310 int8_t last = (addr & ASAN_BLOCK_MASK) + 1;
311 int8_t *byte = va_to_shadow((void *)addr);
312
313 if (*byte == 0 || last <= *byte)
314 return true;
315
316 return false;
317 }
318
asan_shadow_2byte_isvalid(vaddr_t addr)319 static __always_inline bool asan_shadow_2byte_isvalid(vaddr_t addr)
320 {
321 if (addr_crosses_scale_boundary(addr, 2)) {
322 return (asan_shadow_1byte_isvalid(addr) &&
323 asan_shadow_1byte_isvalid(addr + 1));
324 } else {
325 int8_t last = ((addr + 1) & ASAN_BLOCK_MASK) + 1;
326 int8_t *byte = va_to_shadow((void *)addr);
327
328 if (*byte == 0 || last <= *byte)
329 return true;
330
331 return false;
332 }
333 }
334
asan_shadow_4byte_isvalid(vaddr_t addr)335 static __always_inline bool asan_shadow_4byte_isvalid(vaddr_t addr)
336 {
337 if (addr_crosses_scale_boundary(addr, 4)) {
338 return (asan_shadow_2byte_isvalid(addr) &&
339 asan_shadow_2byte_isvalid(addr + 2));
340 } else {
341 int8_t last = ((addr + 3) & ASAN_BLOCK_MASK) + 1;
342 int8_t *byte = va_to_shadow((void *)addr);
343
344 if (*byte == 0 || last <= *byte)
345 return true;
346
347 return false;
348 }
349 }
350
asan_shadow_8byte_isvalid(vaddr_t addr)351 static __always_inline bool asan_shadow_8byte_isvalid(vaddr_t addr)
352 {
353 if (addr_crosses_scale_boundary(addr, 8)) {
354 return (asan_shadow_4byte_isvalid(addr) &&
355 asan_shadow_4byte_isvalid(addr + 4));
356 } else {
357 int8_t last = ((addr + 7) & ASAN_BLOCK_MASK) + 1;
358 int8_t *byte = va_to_shadow((void *)addr);
359
360 if (*byte == 0 || last <= *byte)
361 return true;
362
363 return false;
364 }
365 }
366
asan_shadow_Nbyte_isvalid(vaddr_t addr,size_t size)367 static __always_inline bool asan_shadow_Nbyte_isvalid(vaddr_t addr,
368 size_t size)
369 {
370 size_t i = 0;
371
372 for (; i < size; i++) {
373 if (!asan_shadow_1byte_isvalid(addr + i))
374 return false;
375 }
376
377 return true;
378 }
379
check_access(vaddr_t addr,size_t size)380 static __always_inline void check_access(vaddr_t addr, size_t size)
381 {
382 bool valid = false;
383 void *begin = (void *)addr;
384 void *end = (void *)(addr + size);
385
386 if (!asan_active)
387 return;
388 if (size == 0)
389 return;
390 if (va_range_outside_shadow(begin, end))
391 return;
392 /*
393 * If it isn't outside it has to be completely inside or there's a
394 * problem.
395 */
396 if (!va_range_inside_shadow(begin, end))
397 asan_panic();
398
399 if (__builtin_constant_p(size)) {
400 switch (size) {
401 case 1:
402 valid = asan_shadow_1byte_isvalid(addr);
403 break;
404 case 2:
405 valid = asan_shadow_2byte_isvalid(addr);
406 break;
407 case 4:
408 valid = asan_shadow_4byte_isvalid(addr);
409 break;
410 case 8:
411 valid = asan_shadow_8byte_isvalid(addr);
412 break;
413 default:
414 valid = asan_shadow_Nbyte_isvalid(addr, size);
415 break;
416 }
417 } else {
418 valid = asan_shadow_Nbyte_isvalid(addr, size);
419 }
420
421 if (!valid)
422 asan_report(addr, size);
423 }
424
check_load(vaddr_t addr,size_t size)425 static __always_inline void check_load(vaddr_t addr, size_t size)
426 {
427 check_access(addr, size);
428 }
429
check_store(vaddr_t addr,size_t size)430 static __always_inline void check_store(vaddr_t addr, size_t size)
431 {
432 check_access(addr, size);
433 }
434
report_load(vaddr_t addr __unused,size_t size __unused)435 static void __noreturn report_load(vaddr_t addr __unused, size_t size __unused)
436 {
437 asan_panic();
438 }
439
report_store(vaddr_t addr __unused,size_t size __unused)440 static void __noreturn report_store(vaddr_t addr __unused, size_t size __unused)
441 {
442 asan_panic();
443 }
444
445
446
447 #define DEFINE_ASAN_FUNC(type, size) \
448 void __asan_##type##size(vaddr_t addr); \
449 void __asan_##type##size(vaddr_t addr) \
450 { check_##type(addr, size); } \
451 void __asan_##type##size##_noabort(vaddr_t addr); \
452 void __asan_##type##size##_noabort(vaddr_t addr) \
453 { check_##type(addr, size); } \
454 void __asan_report_##type##size##_noabort(vaddr_t addr);\
455 void __noreturn __asan_report_##type##size##_noabort(vaddr_t addr) \
456 { report_##type(addr, size); }
457
458 DEFINE_ASAN_FUNC(load, 1)
459 DEFINE_ASAN_FUNC(load, 2)
460 DEFINE_ASAN_FUNC(load, 4)
461 DEFINE_ASAN_FUNC(load, 8)
462 DEFINE_ASAN_FUNC(load, 16)
463 DEFINE_ASAN_FUNC(store, 1)
464 DEFINE_ASAN_FUNC(store, 2)
465 DEFINE_ASAN_FUNC(store, 4)
466 DEFINE_ASAN_FUNC(store, 8)
467 DEFINE_ASAN_FUNC(store, 16)
468
469 void __asan_loadN_noabort(vaddr_t addr, size_t size);
__asan_loadN_noabort(vaddr_t addr,size_t size)470 void __asan_loadN_noabort(vaddr_t addr, size_t size)
471 {
472 check_load(addr, size);
473 }
474
475 void __asan_storeN_noabort(vaddr_t addr, size_t size);
__asan_storeN_noabort(vaddr_t addr,size_t size)476 void __asan_storeN_noabort(vaddr_t addr, size_t size)
477 {
478 check_store(addr, size);
479 }
480
481 void __asan_report_load_n_noabort(vaddr_t addr, size_t size);
__asan_report_load_n_noabort(vaddr_t addr,size_t size)482 void __noreturn __asan_report_load_n_noabort(vaddr_t addr, size_t size)
483 {
484 report_load(addr, size);
485 }
486
487 void __asan_report_store_n_noabort(vaddr_t addr, size_t size);
__asan_report_store_n_noabort(vaddr_t addr,size_t size)488 void __noreturn __asan_report_store_n_noabort(vaddr_t addr, size_t size)
489 {
490 report_store(addr, size);
491 }
492
493 void __asan_handle_no_return(void);
__asan_handle_no_return(void)494 void __asan_handle_no_return(void)
495 {
496 }
497
498 void __asan_register_globals(struct asan_global *globals, size_t size);
__asan_register_globals(struct asan_global * globals,size_t size)499 void __asan_register_globals(struct asan_global *globals, size_t size)
500 {
501 size_t n = 0;
502
503 for (n = 0; n < size; n++) {
504 vaddr_t begin = globals[n].beg;
505 vaddr_t end = begin + globals[n].size;
506 vaddr_t end_align = ROUNDUP(end, ASAN_BLOCK_SIZE);
507 vaddr_t end_rz = begin + globals[n].size_with_redzone;
508
509 if (!mpool_exists_in_range(begin, end))
510 asan_tag_access((void *)begin, (void *)end);
511 asan_tag_no_access((void *)end_align, (void *)end_rz);
512 }
513 }
514 DECLARE_KEEP_INIT(__asan_register_globals);
515
516 void __asan_unregister_globals(struct asan_global *globals, size_t size);
__asan_unregister_globals(struct asan_global * globals __unused,size_t size __unused)517 void __asan_unregister_globals(struct asan_global *globals __unused,
518 size_t size __unused)
519 {
520 }
521
asan_handle_longjmp(void * old_sp)522 void asan_handle_longjmp(void *old_sp)
523 {
524 void *top = old_sp;
525 void *bottom = (void *)ROUNDDOWN((vaddr_t)&top,
526 ASAN_BLOCK_SIZE);
527
528 asan_tag_access(bottom, top);
529 }
530
531 #if !defined(__KERNEL__)
532
asan_map_shadow_region(vaddr_t lo,vaddr_t hi)533 static int asan_map_shadow_region(vaddr_t lo, vaddr_t hi)
534 {
535 struct asan_global_info *asan_info = GET_ASAN_INFO();
536 TEE_Result rc = TEE_SUCCESS;
537 size_t sz = hi - lo;
538 vaddr_t req = lo;
539
540 if (asan_info->s_regs_count >= ASAN_VA_REGS_MAX)
541 return -1;
542
543 #if defined(__LDELF__)
544 rc = _ldelf_map_zi(&req, sz, 0, 0, 0);
545 #else
546 req = (vaddr_t)tee_map_zi_va(req, sz, 0);
547 #endif
548 if (rc != TEE_SUCCESS)
549 return -1;
550 if (req != lo)
551 return -1;
552
553 asan_info->s_regs[asan_info->s_regs_count++] =
554 (struct asan_va_reg){ lo, hi };
555
556 return 0;
557 }
558
asan_user_map_shadow(void * lo,void * hi,enum asan_va_reg_type type)559 int asan_user_map_shadow(void *lo, void *hi, enum asan_va_reg_type type)
560 {
561 vaddr_t lo_s = 0;
562 vaddr_t hi_s = 0;
563 int rc = 0;
564
565 if (lo == hi)
566 return -1;
567
568 lo_s = ROUNDDOWN((vaddr_t)va_to_shadow(lo), SMALL_PAGE_SIZE);
569 hi_s = ROUNDUP((vaddr_t)va_to_shadow(hi), SMALL_PAGE_SIZE);
570
571 if (lo_s >= hi_s)
572 return -1;
573 if (hi >= (void *)GET_ASAN_INFO())
574 return -1;
575
576 /*
577 * Walk the already mapped shadow ranges and trim [lo_s, hi_s)
578 * down to the part that is still missing.
579 */
580 for (size_t i = 0; i < GET_ASAN_INFO()->s_regs_count; i++) {
581 vaddr_t reg_lo_s = GET_ASAN_INFO()->s_regs[i].lo;
582 vaddr_t reg_hi_s = GET_ASAN_INFO()->s_regs[i].hi;
583
584 if (reg_hi_s <= lo_s || reg_lo_s >= hi_s) {
585 /*
586 * This mapped range does not intersect the
587 * requested range. Skip it.
588 */
589 continue;
590 }
591 if (reg_lo_s <= lo_s && reg_hi_s >= hi_s) {
592 /*
593 * The requested shadow range is already fully
594 * mapped, so there is nothing left to do.
595 */
596 goto out;
597 }
598 if (reg_lo_s <= lo_s && reg_hi_s < hi_s) {
599 /*
600 * The mapped range covers the left side of
601 * the requested range.
602 */
603 lo_s = reg_hi_s;
604 continue;
605 }
606 if (reg_lo_s > lo_s && reg_hi_s >= hi_s) {
607 /*
608 * The mapped range covers the right side of
609 * the requested range.
610 */
611 hi_s = reg_lo_s;
612 continue;
613 }
614 /*
615 * If we are here then there is a problem, that shouldn't
616 * happen for valid shadow mapping intervals.
617 */
618 EMSG("can't handle: reg_lo_s %#"PRIxVA" reg_hi_s %#"
619 PRIxVA" lo_s %#"PRIxVA" hi_s %#"PRIxVA, reg_lo_s,
620 reg_hi_s, lo_s, hi_s);
621 asan_panic();
622 }
623 /*
624 * If we reach this point, [lo_s, hi_s) is the remaining shadow
625 * gap that still needs to be mapped.
626 */
627 assert(hi_s > lo_s);
628 rc = asan_map_shadow_region(lo_s, hi_s);
629 if (rc) {
630 EMSG("Failed to map shadow region");
631 asan_panic();
632 }
633 out:
634 /* Remember the original VA range as checked by ASan. */
635 asan_add_shadowed(lo, hi, type);
636 return 0;
637 }
638
639 #else
640
asan_user_map_shadow(void * lo __unused,void * hi __unused,enum asan_va_reg_type type __unused)641 int asan_user_map_shadow(void *lo __unused, void *hi __unused,
642 enum asan_va_reg_type type __unused)
643 {
644 return 0;
645 }
646 #endif
647