1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 */ 6 7 #include <asan.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <keep.h> 11 #include <kernel/panic.h> 12 #include <printk.h> 13 #include <setjmp.h> 14 #include <string.h> 15 #include <trace.h> 16 #include <types_ext.h> 17 #include <util.h> 18 19 #if __GCC_VERSION >= 70000 20 #define ASAN_ABI_VERSION 7 21 #else 22 #define ASAN_ABI_VERSION 6 23 #endif 24 25 struct asan_source_location { 26 const char *file_name; 27 int line_no; 28 int column_no; 29 }; 30 31 struct asan_global { 32 uintptr_t beg; 33 uintptr_t size; 34 uintptr_t size_with_redzone; 35 const char *name; 36 const char *module_name; 37 uintptr_t has_dynamic_init; 38 struct asan_source_location *location; 39 #if ASAN_ABI_VERSION >= 7 40 uintptr_t odr_indicator; 41 #endif 42 }; 43 44 static struct asan_global_info __asan_global_info; 45 46 static bool asan_active; 47 static asan_panic_cb_t asan_panic_cb = asan_panic; 48 49 static bool addr_crosses_scale_boundary(vaddr_t addr, size_t size) 50 { 51 return (addr >> ASAN_BLOCK_SHIFT) != 52 ((addr + size - 1) >> ASAN_BLOCK_SHIFT); 53 } 54 55 static int8_t *va_to_shadow(const void *va) 56 { 57 vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) + CFG_ASAN_SHADOW_OFFSET; 58 59 return (int8_t *)sa; 60 } 61 62 static size_t va_range_to_shadow_size(const void *begin, const void *end) 63 { 64 return ((vaddr_t)end - (vaddr_t)begin) / ASAN_BLOCK_SIZE; 65 } 66 67 static bool va_range_inside_shadow(const void *begin, const void *end) 68 { 69 struct asan_va_reg *regs = GET_ASAN_INFO()->regs; 70 vaddr_t b = (vaddr_t)begin; 71 vaddr_t e = (vaddr_t)end; 72 unsigned int i = 0; 73 74 if (b >= e) 75 return false; 76 77 for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) { 78 if (b >= regs[i].lo && e <= regs[i].hi) { 79 /* Access is covered fully by at least one region */ 80 return true; 81 } 82 } 83 84 return false; 85 } 86 87 static bool va_range_outside_shadow(const void *begin, const void *end) 88 { 89 struct asan_va_reg *regs = GET_ASAN_INFO()->regs; 90 vaddr_t b = (vaddr_t)begin; 91 vaddr_t e = (vaddr_t)end; 92 unsigned int i = 0; 93 94 if (b >= e) 95 return false; 96 97 for (i = 0; i < GET_ASAN_INFO()->regs_count; i++) { 98 if (b < regs[i].hi && e > regs[i].lo) { 99 /* Access covers region at least partly */ 100 return false; 101 } 102 } 103 104 return true; 105 } 106 107 static size_t va_misalignment(const void *va) 108 { 109 return (vaddr_t)va & ASAN_BLOCK_MASK; 110 } 111 112 static bool va_is_well_aligned(const void *va) 113 { 114 return !va_misalignment(va); 115 } 116 117 void asan_add_shadowed(const void *begin, const void *end) 118 { 119 struct asan_va_reg reg = {(vaddr_t)begin, (vaddr_t)end}; 120 struct asan_global_info *asan_info = GET_ASAN_INFO(); 121 122 assert(va_is_well_aligned(begin)); 123 assert(va_is_well_aligned(end)); 124 assert(reg.lo < reg.hi); 125 if (asan_info->regs_count < ASAN_VA_REGS_MAX) { 126 asan_info->regs[asan_info->regs_count++] = reg; 127 } else { 128 EMSG("No free regions to allocate"); 129 asan_panic(); 130 } 131 } 132 133 void asan_tag_no_access(const void *begin, const void *end) 134 { 135 assert(va_is_well_aligned(begin)); 136 assert(va_is_well_aligned(end)); 137 assert(va_range_inside_shadow(begin, end)); 138 139 asan_memset_unchecked(va_to_shadow(begin), ASAN_DATA_RED_ZONE, 140 va_range_to_shadow_size(begin, end)); 141 } 142 143 void asan_tag_access(const void *begin, const void *end) 144 { 145 if (!GET_ASAN_INFO()->regs_count || begin == end) 146 return; 147 148 assert(va_range_inside_shadow(begin, end)); 149 assert(va_is_well_aligned(begin)); 150 151 asan_memset_unchecked(va_to_shadow(begin), 0, 152 va_range_to_shadow_size(begin, end)); 153 if (!va_is_well_aligned(end)) 154 *va_to_shadow(end) = va_misalignment(end); 155 } 156 157 void asan_tag_heap_free(const void *begin, const void *end) 158 { 159 if (!GET_ASAN_INFO()->regs_count) 160 return; 161 162 assert(va_range_inside_shadow(begin, end)); 163 assert(va_is_well_aligned(begin)); 164 assert(va_is_well_aligned(end)); 165 166 asan_memset_unchecked(va_to_shadow(begin), ASAN_HEAP_RED_ZONE, 167 va_range_to_shadow_size(begin, end)); 168 } 169 170 __inhibit_loop_to_libcall void *asan_memset_unchecked(void *s, int c, size_t n) 171 { 172 uint8_t *b = s; 173 size_t m; 174 175 for (m = 0; m < n; m++) 176 b[m] = c; 177 178 return s; 179 } 180 181 __inhibit_loop_to_libcall 182 void *asan_memcpy_unchecked(void *__restrict dst, const void *__restrict src, 183 size_t len) 184 { 185 uint8_t *__restrict d = dst; 186 const uint8_t *__restrict s = src; 187 size_t n; 188 189 for (n = 0; n < len; n++) 190 d[n] = s[n]; 191 192 return dst; 193 } 194 195 void asan_start(void) 196 { 197 assert(GET_ASAN_INFO()->regs_count > 0 && !asan_active); 198 asan_active = true; 199 } 200 201 void __noreturn asan_panic(void) 202 { 203 panic(); 204 } 205 206 void asan_set_panic_cb(asan_panic_cb_t panic_cb) 207 { 208 asan_panic_cb = panic_cb; 209 } 210 211 static void asan_report(vaddr_t addr, size_t size) 212 { 213 #ifdef KASAN_DUMP_SHADOW 214 char buf[128] = {0}; 215 int r = 0, rc = 0; 216 vaddr_t b = 0, e = 0, saddr = 0; 217 218 b = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) - ASAN_BLOCK_SIZE; 219 e = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) + ASAN_BLOCK_SIZE; 220 221 /* Print shadow map nearby */ 222 if (va_range_inside_shadow((void *)b, (void *)e)) { 223 rc = snprintk(buf + r, sizeof(buf) - r, "%lx: ", b); 224 assert(rc > 0); 225 r += rc; 226 for (saddr = b; saddr <= e; saddr += ASAN_BLOCK_SIZE) { 227 int8_t *sbyte = va_to_shadow((void *)saddr); 228 229 rc = snprintk(buf + r, sizeof(buf) - r, 230 "0x%02x ", (uint8_t)*sbyte); 231 assert(rc > 0); 232 r += rc; 233 } 234 EMSG("%s", buf); 235 } 236 #endif 237 EMSG("[ASAN]: access violation, addr: %lx size: %zu\n", 238 addr, size); 239 240 asan_panic_cb(); 241 } 242 243 static __always_inline bool asan_shadow_1byte_isvalid(vaddr_t addr) 244 { 245 int8_t last = (addr & ASAN_BLOCK_MASK) + 1; 246 int8_t *byte = va_to_shadow((void *)addr); 247 248 if (*byte == 0 || last <= *byte) 249 return true; 250 251 return false; 252 } 253 254 static __always_inline bool asan_shadow_2byte_isvalid(vaddr_t addr) 255 { 256 if (addr_crosses_scale_boundary(addr, 2)) { 257 return (asan_shadow_1byte_isvalid(addr) && 258 asan_shadow_1byte_isvalid(addr + 1)); 259 } else { 260 int8_t last = ((addr + 1) & ASAN_BLOCK_MASK) + 1; 261 int8_t *byte = va_to_shadow((void *)addr); 262 263 if (*byte == 0 || last <= *byte) 264 return true; 265 266 return false; 267 } 268 } 269 270 static __always_inline bool asan_shadow_4byte_isvalid(vaddr_t addr) 271 { 272 if (addr_crosses_scale_boundary(addr, 4)) { 273 return (asan_shadow_2byte_isvalid(addr) && 274 asan_shadow_2byte_isvalid(addr + 2)); 275 } else { 276 int8_t last = ((addr + 3) & ASAN_BLOCK_MASK) + 1; 277 int8_t *byte = va_to_shadow((void *)addr); 278 279 if (*byte == 0 || last <= *byte) 280 return true; 281 282 return false; 283 } 284 } 285 286 static __always_inline bool asan_shadow_8byte_isvalid(vaddr_t addr) 287 { 288 if (addr_crosses_scale_boundary(addr, 8)) { 289 return (asan_shadow_4byte_isvalid(addr) && 290 asan_shadow_4byte_isvalid(addr + 4)); 291 } else { 292 int8_t last = ((addr + 7) & ASAN_BLOCK_MASK) + 1; 293 int8_t *byte = va_to_shadow((void *)addr); 294 295 if (*byte == 0 || last <= *byte) 296 return true; 297 298 return false; 299 } 300 } 301 302 static __always_inline bool asan_shadow_Nbyte_isvalid(vaddr_t addr, 303 size_t size) 304 { 305 size_t i = 0; 306 307 for (; i < size; i++) { 308 if (!asan_shadow_1byte_isvalid(addr + i)) 309 return false; 310 } 311 312 return true; 313 } 314 315 static __always_inline void check_access(vaddr_t addr, size_t size) 316 { 317 bool valid = false; 318 void *begin = (void *)addr; 319 void *end = (void *)(addr + size); 320 321 if (!asan_active) 322 return; 323 if (size == 0) 324 return; 325 if (va_range_outside_shadow(begin, end)) 326 return; 327 /* 328 * If it isn't outside it has to be completely inside or there's a 329 * problem. 330 */ 331 if (!va_range_inside_shadow(begin, end)) 332 panic(); 333 334 if (__builtin_constant_p(size)) { 335 switch (size) { 336 case 1: 337 valid = asan_shadow_1byte_isvalid(addr); 338 break; 339 case 2: 340 valid = asan_shadow_2byte_isvalid(addr); 341 break; 342 case 4: 343 valid = asan_shadow_4byte_isvalid(addr); 344 break; 345 case 8: 346 valid = asan_shadow_8byte_isvalid(addr); 347 break; 348 default: 349 valid = asan_shadow_Nbyte_isvalid(addr, size); 350 break; 351 } 352 } else { 353 valid = asan_shadow_Nbyte_isvalid(addr, size); 354 } 355 356 if (!valid) 357 asan_report(addr, size); 358 } 359 360 static __always_inline void check_load(vaddr_t addr, size_t size) 361 { 362 check_access(addr, size); 363 } 364 365 static __always_inline void check_store(vaddr_t addr, size_t size) 366 { 367 check_access(addr, size); 368 } 369 370 static void __noreturn report_load(vaddr_t addr __unused, size_t size __unused) 371 { 372 panic(); 373 } 374 375 static void __noreturn report_store(vaddr_t addr __unused, size_t size __unused) 376 { 377 panic(); 378 } 379 380 381 382 #define DEFINE_ASAN_FUNC(type, size) \ 383 void __asan_##type##size(vaddr_t addr); \ 384 void __asan_##type##size(vaddr_t addr) \ 385 { check_##type(addr, size); } \ 386 void __asan_##type##size##_noabort(vaddr_t addr); \ 387 void __asan_##type##size##_noabort(vaddr_t addr) \ 388 { check_##type(addr, size); } \ 389 void __asan_report_##type##size##_noabort(vaddr_t addr);\ 390 void __noreturn __asan_report_##type##size##_noabort(vaddr_t addr) \ 391 { report_##type(addr, size); } 392 393 DEFINE_ASAN_FUNC(load, 1) 394 DEFINE_ASAN_FUNC(load, 2) 395 DEFINE_ASAN_FUNC(load, 4) 396 DEFINE_ASAN_FUNC(load, 8) 397 DEFINE_ASAN_FUNC(load, 16) 398 DEFINE_ASAN_FUNC(store, 1) 399 DEFINE_ASAN_FUNC(store, 2) 400 DEFINE_ASAN_FUNC(store, 4) 401 DEFINE_ASAN_FUNC(store, 8) 402 DEFINE_ASAN_FUNC(store, 16) 403 404 void __asan_loadN_noabort(vaddr_t addr, size_t size); 405 void __asan_loadN_noabort(vaddr_t addr, size_t size) 406 { 407 check_load(addr, size); 408 } 409 410 void __asan_storeN_noabort(vaddr_t addr, size_t size); 411 void __asan_storeN_noabort(vaddr_t addr, size_t size) 412 { 413 check_store(addr, size); 414 } 415 416 void __asan_report_load_n_noabort(vaddr_t addr, size_t size); 417 void __noreturn __asan_report_load_n_noabort(vaddr_t addr, size_t size) 418 { 419 report_load(addr, size); 420 } 421 422 void __asan_report_store_n_noabort(vaddr_t addr, size_t size); 423 void __noreturn __asan_report_store_n_noabort(vaddr_t addr, size_t size) 424 { 425 report_store(addr, size); 426 } 427 428 void __asan_handle_no_return(void); 429 void __asan_handle_no_return(void) 430 { 431 } 432 433 void __asan_register_globals(struct asan_global *globals, size_t size); 434 void __asan_register_globals(struct asan_global *globals, size_t size) 435 { 436 size_t n = 0; 437 438 for (n = 0; n < size; n++) { 439 vaddr_t begin = globals[n].beg; 440 vaddr_t end = begin + globals[n].size; 441 vaddr_t end_align = ROUNDUP(end, ASAN_BLOCK_SIZE); 442 vaddr_t end_rz = begin + globals[n].size_with_redzone; 443 444 asan_tag_access((void *)begin, (void *)end); 445 asan_tag_no_access((void *)end_align, (void *)end_rz); 446 } 447 } 448 DECLARE_KEEP_INIT(__asan_register_globals); 449 450 void __asan_unregister_globals(struct asan_global *globals, size_t size); 451 void __asan_unregister_globals(struct asan_global *globals __unused, 452 size_t size __unused) 453 { 454 } 455 456 void asan_handle_longjmp(void *old_sp) 457 { 458 void *top = old_sp; 459 void *bottom = (void *)ROUNDDOWN((vaddr_t)&top, 460 ASAN_BLOCK_SIZE); 461 462 asan_tag_access(bottom, top); 463 } 464