1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 */ 6 7 #include <assert.h> 8 #include <compiler.h> 9 #include <keep.h> 10 #include <kernel/asan.h> 11 #include <kernel/panic.h> 12 #include <printk.h> 13 #include <setjmp.h> 14 #include <string.h> 15 #include <trace.h> 16 #include <types_ext.h> 17 #include <util.h> 18 19 #if __GCC_VERSION >= 70000 20 #define ASAN_ABI_VERSION 7 21 #else 22 #define ASAN_ABI_VERSION 6 23 #endif 24 25 struct asan_source_location { 26 const char *file_name; 27 int line_no; 28 int column_no; 29 }; 30 31 struct asan_global { 32 uintptr_t beg; 33 uintptr_t size; 34 uintptr_t size_with_redzone; 35 const char *name; 36 const char *module_name; 37 uintptr_t has_dynamic_init; 38 struct asan_source_location *location; 39 #if ASAN_ABI_VERSION >= 7 40 uintptr_t odr_indicator; 41 #endif 42 }; 43 44 static vaddr_t asan_va_base; 45 static size_t asan_va_size; 46 static bool asan_active; 47 static asan_panic_cb_t asan_panic_cb = asan_panic; 48 49 static bool addr_crosses_scale_boundary(vaddr_t addr, size_t size) 50 { 51 return (addr >> ASAN_BLOCK_SHIFT) != 52 ((addr + size - 1) >> ASAN_BLOCK_SHIFT); 53 } 54 55 static int8_t *va_to_shadow(const void *va) 56 { 57 vaddr_t sa = ((vaddr_t)va / ASAN_BLOCK_SIZE) + CFG_ASAN_SHADOW_OFFSET; 58 59 return (int8_t *)sa; 60 } 61 62 static size_t va_range_to_shadow_size(const void *begin, const void *end) 63 { 64 return ((vaddr_t)end - (vaddr_t)begin) / ASAN_BLOCK_SIZE; 65 } 66 67 static bool va_range_inside_shadow(const void *begin, const void *end) 68 { 69 vaddr_t b = (vaddr_t)begin; 70 vaddr_t e = (vaddr_t)end; 71 72 if (b >= e) 73 return false; 74 return (b >= asan_va_base) && (e <= (asan_va_base + asan_va_size)); 75 } 76 77 static bool va_range_outside_shadow(const void *begin, const void *end) 78 { 79 vaddr_t b = (vaddr_t)begin; 80 vaddr_t e = (vaddr_t)end; 81 82 if (b >= e) 83 return false; 84 return (e <= asan_va_base) || (b >= (asan_va_base + asan_va_size)); 85 } 86 87 static size_t va_misalignment(const void *va) 88 { 89 return (vaddr_t)va & ASAN_BLOCK_MASK; 90 } 91 92 static bool va_is_well_aligned(const void *va) 93 { 94 return !va_misalignment(va); 95 } 96 97 void asan_set_shadowed(const void *begin, const void *end) 98 { 99 vaddr_t b = (vaddr_t)begin; 100 vaddr_t e = (vaddr_t)end; 101 102 assert(!asan_va_base); 103 assert(va_is_well_aligned(begin)); 104 assert(va_is_well_aligned(end)); 105 assert(b < e); 106 107 asan_va_base = b; 108 asan_va_size = e - b; 109 } 110 111 void asan_tag_no_access(const void *begin, const void *end) 112 { 113 assert(va_is_well_aligned(begin)); 114 assert(va_is_well_aligned(end)); 115 assert(va_range_inside_shadow(begin, end)); 116 117 asan_memset_unchecked(va_to_shadow(begin), ASAN_DATA_RED_ZONE, 118 va_range_to_shadow_size(begin, end)); 119 } 120 121 void asan_tag_access(const void *begin, const void *end) 122 { 123 if (!asan_va_base || (begin == end)) 124 return; 125 126 assert(va_range_inside_shadow(begin, end)); 127 assert(va_is_well_aligned(begin)); 128 129 asan_memset_unchecked(va_to_shadow(begin), 0, 130 va_range_to_shadow_size(begin, end)); 131 if (!va_is_well_aligned(end)) 132 *va_to_shadow(end) = va_misalignment(end); 133 } 134 135 void asan_tag_heap_free(const void *begin, const void *end) 136 { 137 if (!asan_va_base) 138 return; 139 140 assert(va_range_inside_shadow(begin, end)); 141 assert(va_is_well_aligned(begin)); 142 assert(va_is_well_aligned(end)); 143 144 asan_memset_unchecked(va_to_shadow(begin), ASAN_HEAP_RED_ZONE, 145 va_range_to_shadow_size(begin, end)); 146 } 147 148 __inhibit_loop_to_libcall void *asan_memset_unchecked(void *s, int c, size_t n) 149 { 150 uint8_t *b = s; 151 size_t m; 152 153 for (m = 0; m < n; m++) 154 b[m] = c; 155 156 return s; 157 } 158 159 __inhibit_loop_to_libcall 160 void *asan_memcpy_unchecked(void *__restrict dst, const void *__restrict src, 161 size_t len) 162 { 163 uint8_t *__restrict d = dst; 164 const uint8_t *__restrict s = src; 165 size_t n; 166 167 for (n = 0; n < len; n++) 168 d[n] = s[n]; 169 170 return dst; 171 } 172 173 void asan_start(void) 174 { 175 assert(asan_va_base && !asan_active); 176 asan_active = true; 177 } 178 179 void __noreturn asan_panic(void) 180 { 181 panic(); 182 } 183 184 void asan_set_panic_cb(asan_panic_cb_t panic_cb) 185 { 186 asan_panic_cb = panic_cb; 187 } 188 189 static void asan_report(vaddr_t addr, size_t size) 190 { 191 #ifdef KASAN_DUMP_SHADOW 192 char buf[128] = {0}; 193 int r = 0, rc = 0; 194 vaddr_t b = 0, e = 0, saddr = 0; 195 196 b = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) - ASAN_BLOCK_SIZE; 197 e = ROUNDDOWN(addr, ASAN_BLOCK_SIZE) + ASAN_BLOCK_SIZE; 198 199 /* Print shadow map nearby */ 200 if (va_range_inside_shadow((void *)b, (void *)e)) { 201 rc = snprintk(buf + r, sizeof(buf) - r, "%lx: ", b); 202 assert(rc > 0); 203 r += rc; 204 for (saddr = b; saddr <= e; saddr += ASAN_BLOCK_SIZE) { 205 int8_t *sbyte = va_to_shadow((void *)saddr); 206 207 rc = snprintk(buf + r, sizeof(buf) - r, 208 "0x%02x ", (uint8_t)*sbyte); 209 assert(rc > 0); 210 r += rc; 211 } 212 EMSG("%s", buf); 213 } 214 #endif 215 EMSG("[ASAN]: access violation, addr: %lx size: %zu\n", 216 addr, size); 217 218 asan_panic_cb(); 219 } 220 221 static __always_inline bool asan_shadow_1byte_isvalid(vaddr_t addr) 222 { 223 int8_t last = (addr & ASAN_BLOCK_MASK) + 1; 224 int8_t *byte = va_to_shadow((void *)addr); 225 226 if (*byte == 0 || last <= *byte) 227 return true; 228 229 return false; 230 } 231 232 static __always_inline bool asan_shadow_2byte_isvalid(vaddr_t addr) 233 { 234 if (addr_crosses_scale_boundary(addr, 2)) { 235 return (asan_shadow_1byte_isvalid(addr) && 236 asan_shadow_1byte_isvalid(addr + 1)); 237 } else { 238 int8_t last = ((addr + 1) & ASAN_BLOCK_MASK) + 1; 239 int8_t *byte = va_to_shadow((void *)addr); 240 241 if (*byte == 0 || last <= *byte) 242 return true; 243 244 return false; 245 } 246 } 247 248 static __always_inline bool asan_shadow_4byte_isvalid(vaddr_t addr) 249 { 250 if (addr_crosses_scale_boundary(addr, 4)) { 251 return (asan_shadow_2byte_isvalid(addr) && 252 asan_shadow_2byte_isvalid(addr + 2)); 253 } else { 254 int8_t last = ((addr + 3) & ASAN_BLOCK_MASK) + 1; 255 int8_t *byte = va_to_shadow((void *)addr); 256 257 if (*byte == 0 || last <= *byte) 258 return true; 259 260 return false; 261 } 262 } 263 264 static __always_inline bool asan_shadow_8byte_isvalid(vaddr_t addr) 265 { 266 if (addr_crosses_scale_boundary(addr, 8)) { 267 return (asan_shadow_4byte_isvalid(addr) && 268 asan_shadow_4byte_isvalid(addr + 4)); 269 } else { 270 int8_t last = ((addr + 7) & ASAN_BLOCK_MASK) + 1; 271 int8_t *byte = va_to_shadow((void *)addr); 272 273 if (*byte == 0 || last <= *byte) 274 return true; 275 276 return false; 277 } 278 } 279 280 static __always_inline bool asan_shadow_Nbyte_isvalid(vaddr_t addr, 281 size_t size) 282 { 283 size_t i = 0; 284 285 for (; i < size; i++) { 286 if (!asan_shadow_1byte_isvalid(addr + i)) 287 return false; 288 } 289 290 return true; 291 } 292 293 static __always_inline void check_access(vaddr_t addr, size_t size) 294 { 295 bool valid = false; 296 void *begin = (void *)addr; 297 void *end = (void *)(addr + size); 298 299 if (!asan_active) 300 return; 301 if (size == 0) 302 return; 303 if (va_range_outside_shadow(begin, end)) 304 return; 305 /* 306 * If it isn't outside it has to be completely inside or there's a 307 * problem. 308 */ 309 if (!va_range_inside_shadow(begin, end)) 310 panic(); 311 312 if (__builtin_constant_p(size)) { 313 switch (size) { 314 case 1: 315 valid = asan_shadow_1byte_isvalid(addr); 316 break; 317 case 2: 318 valid = asan_shadow_2byte_isvalid(addr); 319 break; 320 case 4: 321 valid = asan_shadow_4byte_isvalid(addr); 322 break; 323 case 8: 324 valid = asan_shadow_8byte_isvalid(addr); 325 break; 326 default: 327 valid = asan_shadow_Nbyte_isvalid(addr, size); 328 break; 329 } 330 } else { 331 valid = asan_shadow_Nbyte_isvalid(addr, size); 332 } 333 334 if (!valid) 335 asan_report(addr, size); 336 } 337 338 static __always_inline void check_load(vaddr_t addr, size_t size) 339 { 340 check_access(addr, size); 341 } 342 343 static __always_inline void check_store(vaddr_t addr, size_t size) 344 { 345 check_access(addr, size); 346 } 347 348 static void __noreturn report_load(vaddr_t addr __unused, size_t size __unused) 349 { 350 panic(); 351 } 352 353 static void __noreturn report_store(vaddr_t addr __unused, size_t size __unused) 354 { 355 panic(); 356 } 357 358 359 360 #define DEFINE_ASAN_FUNC(type, size) \ 361 void __asan_##type##size(vaddr_t addr); \ 362 void __asan_##type##size(vaddr_t addr) \ 363 { check_##type(addr, size); } \ 364 void __asan_##type##size##_noabort(vaddr_t addr); \ 365 void __asan_##type##size##_noabort(vaddr_t addr) \ 366 { check_##type(addr, size); } \ 367 void __asan_report_##type##size##_noabort(vaddr_t addr);\ 368 void __noreturn __asan_report_##type##size##_noabort(vaddr_t addr) \ 369 { report_##type(addr, size); } 370 371 DEFINE_ASAN_FUNC(load, 1) 372 DEFINE_ASAN_FUNC(load, 2) 373 DEFINE_ASAN_FUNC(load, 4) 374 DEFINE_ASAN_FUNC(load, 8) 375 DEFINE_ASAN_FUNC(load, 16) 376 DEFINE_ASAN_FUNC(store, 1) 377 DEFINE_ASAN_FUNC(store, 2) 378 DEFINE_ASAN_FUNC(store, 4) 379 DEFINE_ASAN_FUNC(store, 8) 380 DEFINE_ASAN_FUNC(store, 16) 381 382 void __asan_loadN_noabort(vaddr_t addr, size_t size); 383 void __asan_loadN_noabort(vaddr_t addr, size_t size) 384 { 385 check_load(addr, size); 386 } 387 388 void __asan_storeN_noabort(vaddr_t addr, size_t size); 389 void __asan_storeN_noabort(vaddr_t addr, size_t size) 390 { 391 check_store(addr, size); 392 } 393 394 void __asan_report_load_n_noabort(vaddr_t addr, size_t size); 395 void __noreturn __asan_report_load_n_noabort(vaddr_t addr, size_t size) 396 { 397 report_load(addr, size); 398 } 399 400 void __asan_report_store_n_noabort(vaddr_t addr, size_t size); 401 void __noreturn __asan_report_store_n_noabort(vaddr_t addr, size_t size) 402 { 403 report_store(addr, size); 404 } 405 406 void __asan_handle_no_return(void); 407 void __asan_handle_no_return(void) 408 { 409 } 410 411 void __asan_register_globals(struct asan_global *globals, size_t size); 412 void __asan_register_globals(struct asan_global *globals, size_t size) 413 { 414 size_t n = 0; 415 416 for (n = 0; n < size; n++) { 417 vaddr_t begin = globals[n].beg; 418 vaddr_t end = begin + globals[n].size; 419 vaddr_t end_align = ROUNDUP(end, ASAN_BLOCK_SIZE); 420 vaddr_t end_rz = begin + globals[n].size_with_redzone; 421 422 asan_tag_access((void *)begin, (void *)end); 423 asan_tag_no_access((void *)end_align, (void *)end_rz); 424 } 425 } 426 DECLARE_KEEP_INIT(__asan_register_globals); 427 428 void __asan_unregister_globals(struct asan_global *globals, size_t size); 429 void __asan_unregister_globals(struct asan_global *globals __unused, 430 size_t size __unused) 431 { 432 } 433 434 void asan_handle_longjmp(void *old_sp) 435 { 436 void *top = old_sp; 437 void *bottom = (void *)ROUNDDOWN((vaddr_t)&top, 438 ASAN_BLOCK_SIZE); 439 440 asan_tag_access(bottom, top); 441 } 442