1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2022, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <memtag.h> 85 #include <pta_stats.h> 86 #include <stdbool.h> 87 #include <stdint.h> 88 #include <stdlib_ext.h> 89 #include <stdlib.h> 90 #include <string.h> 91 #include <trace.h> 92 #include <util.h> 93 94 #if defined(__KERNEL__) 95 /* Compiling for TEE Core */ 96 #include <kernel/asan.h> 97 #include <kernel/spinlock.h> 98 #include <kernel/unwind.h> 99 100 static void *memset_unchecked(void *s, int c, size_t n) 101 { 102 return asan_memset_unchecked(s, c, n); 103 } 104 105 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 106 size_t n) 107 { 108 return asan_memcpy_unchecked(dst, src, n); 109 } 110 111 #else /*__KERNEL__*/ 112 /* Compiling for TA */ 113 114 static void *memset_unchecked(void *s, int c, size_t n) 115 { 116 return memset(s, c, n); 117 } 118 119 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 120 size_t n) 121 { 122 return memcpy(dst, src, n); 123 } 124 125 #endif /*__KERNEL__*/ 126 127 #include "bget.c" /* this is ugly, but this is bget */ 128 129 struct malloc_pool { 130 void *buf; 131 size_t len; 132 }; 133 134 struct malloc_ctx { 135 struct bpoolset poolset; 136 struct malloc_pool *pool; 137 size_t pool_len; 138 #ifdef BufStats 139 struct pta_stats_alloc mstats; 140 #endif 141 #ifdef __KERNEL__ 142 unsigned int spinlock; 143 #endif 144 }; 145 146 #ifdef __KERNEL__ 147 148 static uint32_t malloc_lock(struct malloc_ctx *ctx) 149 { 150 return cpu_spin_lock_xsave(&ctx->spinlock); 151 } 152 153 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 154 { 155 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 156 } 157 158 #else /* __KERNEL__ */ 159 160 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 161 { 162 return 0; 163 } 164 165 static void malloc_unlock(struct malloc_ctx *ctx __unused, 166 uint32_t exceptions __unused) 167 { 168 } 169 170 #endif /* __KERNEL__ */ 171 172 #define DEFINE_CTX(name) struct malloc_ctx name = \ 173 { .poolset = { .freelist = { {0, 0}, \ 174 {&name.poolset.freelist, \ 175 &name.poolset.freelist}}}} 176 177 static DEFINE_CTX(malloc_ctx); 178 179 #ifdef CFG_NS_VIRTUALIZATION 180 static __nex_data DEFINE_CTX(nex_malloc_ctx); 181 #endif 182 183 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 184 { 185 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 186 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 187 print_kernel_stack(); 188 #endif 189 } 190 191 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 192 static __maybe_unused bufsize bget_buf_size(void *buf) 193 { 194 bufsize osize; /* Old size of buffer */ 195 struct bhead *b; 196 197 b = BH(((char *)buf) - sizeof(struct bhead)); 198 osize = -b->bsize; 199 #ifdef BECtl 200 if (osize == 0) { 201 /* Buffer acquired directly through acqfcn. */ 202 struct bdhead *bd; 203 204 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 205 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 206 } else 207 #endif 208 osize -= sizeof(struct bhead); 209 assert(osize > 0); 210 return osize; 211 } 212 213 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size) 214 { 215 if (!buf) 216 return NULL; 217 218 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant); 219 220 if (MEMTAG_IS_ENABLED) { 221 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE); 222 223 /* 224 * Allocated buffer can be larger than requested when 225 * allocating with memalign(), but we should never tag more 226 * than allocated. 227 */ 228 assert(bget_buf_size(buf) >= sz + hdr_size); 229 return memtag_set_random_tags(buf, sz + hdr_size); 230 } 231 232 #if defined(__KERNEL__) 233 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 234 asan_tag_access(buf, buf + hdr_size + requested_size); 235 #endif 236 return buf; 237 } 238 239 static void *maybe_untag_buf(void *buf) 240 { 241 if (!buf) 242 return NULL; 243 244 if (MEMTAG_IS_ENABLED) { 245 size_t sz = 0; 246 247 memtag_assert_tag(buf); /* Trying to catch double free early */ 248 sz = bget_buf_size(memtag_strip_tag(buf)); 249 return memtag_set_tags(buf, sz, 0); 250 } 251 252 #if defined(__KERNEL__) 253 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 254 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 255 #endif 256 return buf; 257 } 258 259 static void *strip_tag(void *buf) 260 { 261 if (MEMTAG_IS_ENABLED) 262 return memtag_strip_tag(buf); 263 return buf; 264 } 265 266 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 267 { 268 #if defined(__KERNEL__) 269 asan_tag_heap_free(buf, (uint8_t *)buf + len); 270 #endif 271 } 272 273 #ifdef BufStats 274 275 static void *raw_malloc_return_hook(void *p, size_t hdr_size, 276 size_t requested_size, 277 struct malloc_ctx *ctx) 278 { 279 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 280 ctx->mstats.max_allocated = ctx->poolset.totalloc; 281 282 if (!p) { 283 ctx->mstats.num_alloc_fail++; 284 print_oom(requested_size, ctx); 285 if (requested_size > ctx->mstats.biggest_alloc_fail) { 286 ctx->mstats.biggest_alloc_fail = requested_size; 287 ctx->mstats.biggest_alloc_fail_used = 288 ctx->poolset.totalloc; 289 } 290 } 291 292 return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size)); 293 } 294 295 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 296 { 297 uint32_t exceptions = malloc_lock(ctx); 298 299 ctx->mstats.max_allocated = 0; 300 ctx->mstats.num_alloc_fail = 0; 301 ctx->mstats.biggest_alloc_fail = 0; 302 ctx->mstats.biggest_alloc_fail_used = 0; 303 malloc_unlock(ctx, exceptions); 304 } 305 306 void malloc_reset_stats(void) 307 { 308 gen_malloc_reset_stats(&malloc_ctx); 309 } 310 311 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 312 struct pta_stats_alloc *stats) 313 { 314 uint32_t exceptions = malloc_lock(ctx); 315 316 raw_malloc_get_stats(ctx, stats); 317 malloc_unlock(ctx, exceptions); 318 } 319 320 void malloc_get_stats(struct pta_stats_alloc *stats) 321 { 322 gen_malloc_get_stats(&malloc_ctx, stats); 323 } 324 325 #else /* BufStats */ 326 327 static void *raw_malloc_return_hook(void *p, size_t hdr_size, 328 size_t requested_size, 329 struct malloc_ctx *ctx ) 330 { 331 if (!p) 332 print_oom(requested_size, ctx); 333 334 return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size)); 335 } 336 337 #endif /* BufStats */ 338 339 #ifdef BufValid 340 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 341 { 342 size_t n; 343 344 for (n = 0; n < ctx->pool_len; n++) 345 bpoolv(ctx->pool[n].buf); 346 } 347 #else 348 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 349 { 350 } 351 #endif 352 353 struct bpool_iterator { 354 struct bfhead *next_buf; 355 size_t pool_idx; 356 }; 357 358 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 359 struct bpool_iterator *iterator) 360 { 361 iterator->pool_idx = 0; 362 iterator->next_buf = BFH(ctx->pool[0].buf); 363 } 364 365 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 366 size_t *len, bool *isfree) 367 { 368 struct bfhead *b = iterator->next_buf; 369 bufsize bs = b->bh.bsize; 370 371 if (bs == ESent) 372 return false; 373 374 if (bs < 0) { 375 /* Allocated buffer */ 376 bs = -bs; 377 378 *isfree = false; 379 } else { 380 /* Free Buffer */ 381 *isfree = true; 382 383 /* Assert that the free list links are intact */ 384 assert(b->ql.blink->ql.flink == b); 385 assert(b->ql.flink->ql.blink == b); 386 } 387 388 *buf = (uint8_t *)b + sizeof(struct bhead); 389 *len = bs - sizeof(struct bhead); 390 391 iterator->next_buf = BFH((uint8_t *)b + bs); 392 return true; 393 } 394 395 static bool bpool_foreach(struct malloc_ctx *ctx, 396 struct bpool_iterator *iterator, void **buf) 397 { 398 while (true) { 399 size_t len; 400 bool isfree; 401 402 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 403 if (isfree) 404 continue; 405 return true; 406 } 407 408 if ((iterator->pool_idx + 1) >= ctx->pool_len) 409 return false; 410 411 iterator->pool_idx++; 412 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 413 } 414 } 415 416 /* Convenience macro for looping over all allocated buffers */ 417 #define BPOOL_FOREACH(ctx, iterator, bp) \ 418 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 419 bpool_foreach((ctx),(iterator), (bp));) 420 421 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 422 size_t pl_size, struct malloc_ctx *ctx) 423 { 424 void *ptr = NULL; 425 bufsize s; 426 427 if (!alignment || !IS_POWER_OF_TWO(alignment)) 428 return NULL; 429 430 raw_malloc_validate_pools(ctx); 431 432 /* Compute total size, excluding the header */ 433 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 434 goto out; 435 436 /* BGET doesn't like 0 sized allocations */ 437 if (!s) 438 s++; 439 440 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 441 out: 442 return raw_malloc_return_hook(ptr, hdr_size, pl_size, ctx); 443 } 444 445 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 446 struct malloc_ctx *ctx) 447 { 448 /* 449 * Note that we're feeding SizeQ as alignment, this is the smallest 450 * alignment that bget() can use. 451 */ 452 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 453 } 454 455 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 456 { 457 raw_malloc_validate_pools(ctx); 458 459 if (ptr) 460 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 461 } 462 463 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 464 size_t pl_size, struct malloc_ctx *ctx) 465 { 466 void *ptr = NULL; 467 bufsize s; 468 469 raw_malloc_validate_pools(ctx); 470 471 /* Compute total size, excluding hdr_size */ 472 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 473 goto out; 474 if (ADD_OVERFLOW(s, ftr_size, &s)) 475 goto out; 476 477 /* BGET doesn't like 0 sized allocations */ 478 if (!s) 479 s++; 480 481 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 482 out: 483 return raw_malloc_return_hook(ptr, hdr_size, pl_nmemb * pl_size, ctx); 484 } 485 486 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 487 size_t pl_size, struct malloc_ctx *ctx) 488 { 489 void *p = NULL; 490 bufsize s; 491 492 /* Compute total size */ 493 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 494 goto out; 495 if (ADD_OVERFLOW(s, ftr_size, &s)) 496 goto out; 497 498 raw_malloc_validate_pools(ctx); 499 500 /* BGET doesn't like 0 sized allocations */ 501 if (!s) 502 s++; 503 504 p = bget(0, 0, s, &ctx->poolset); 505 506 if (p && ptr) { 507 void *old_ptr = maybe_untag_buf(ptr); 508 bufsize old_sz = bget_buf_size(old_ptr); 509 510 if (old_sz < s) { 511 memcpy_unchecked(p, old_ptr, old_sz); 512 #ifndef __KERNEL__ 513 /* User space reallocations are always zeroed */ 514 memset_unchecked((uint8_t *)p + old_sz, 0, s - old_sz); 515 #endif 516 } else { 517 memcpy_unchecked(p, old_ptr, s); 518 } 519 520 brel(old_ptr, &ctx->poolset, false /*!wipe*/); 521 } 522 out: 523 return raw_malloc_return_hook(p, hdr_size, pl_size, ctx); 524 } 525 526 #ifdef ENABLE_MDBG 527 528 struct mdbg_hdr { 529 const char *fname; 530 uint16_t line; 531 uint32_t pl_size; 532 uint32_t magic; 533 #if defined(ARM64) 534 uint64_t pad; 535 #endif 536 }; 537 538 #define MDBG_HEADER_MAGIC 0xadadadad 539 #define MDBG_FOOTER_MAGIC 0xecececec 540 541 static size_t mdbg_get_ftr_size(size_t pl_size) 542 { 543 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 544 545 return ftr_pad + sizeof(uint32_t); 546 } 547 548 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 549 { 550 uint32_t *footer; 551 552 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 553 mdbg_get_ftr_size(hdr->pl_size)); 554 footer--; 555 return strip_tag(footer); 556 } 557 558 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 559 int lineno, size_t pl_size) 560 { 561 uint32_t *footer; 562 563 hdr->fname = fname; 564 hdr->line = lineno; 565 hdr->pl_size = pl_size; 566 hdr->magic = MDBG_HEADER_MAGIC; 567 568 footer = mdbg_get_footer(hdr); 569 *footer = MDBG_FOOTER_MAGIC; 570 } 571 572 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 573 int lineno, size_t size) 574 { 575 struct mdbg_hdr *hdr; 576 uint32_t exceptions = malloc_lock(ctx); 577 578 /* 579 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 580 */ 581 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 582 583 hdr = raw_malloc(sizeof(struct mdbg_hdr), 584 mdbg_get_ftr_size(size), size, ctx); 585 if (hdr) { 586 mdbg_update_hdr(hdr, fname, lineno, size); 587 hdr++; 588 } 589 590 malloc_unlock(ctx, exceptions); 591 return hdr; 592 } 593 594 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 595 { 596 assert(hdr->magic == MDBG_HEADER_MAGIC); 597 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 598 } 599 600 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 601 { 602 struct mdbg_hdr *hdr = ptr; 603 604 if (hdr) { 605 hdr--; 606 assert_header(hdr); 607 hdr->magic = 0; 608 *mdbg_get_footer(hdr) = 0; 609 raw_free(hdr, ctx, wipe); 610 } 611 } 612 613 static void free_helper(void *ptr, bool wipe) 614 { 615 uint32_t exceptions = malloc_lock(&malloc_ctx); 616 617 gen_mdbg_free(&malloc_ctx, ptr, wipe); 618 malloc_unlock(&malloc_ctx, exceptions); 619 } 620 621 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 622 size_t nmemb, size_t size) 623 { 624 struct mdbg_hdr *hdr; 625 uint32_t exceptions = malloc_lock(ctx); 626 627 hdr = raw_calloc(sizeof(struct mdbg_hdr), 628 mdbg_get_ftr_size(nmemb * size), nmemb, size, 629 ctx); 630 if (hdr) { 631 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 632 hdr++; 633 } 634 malloc_unlock(ctx, exceptions); 635 return hdr; 636 } 637 638 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 639 int lineno, void *ptr, size_t size) 640 { 641 struct mdbg_hdr *hdr = ptr; 642 643 if (hdr) { 644 hdr--; 645 assert_header(hdr); 646 } 647 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 648 mdbg_get_ftr_size(size), size, ctx); 649 if (hdr) { 650 mdbg_update_hdr(hdr, fname, lineno, size); 651 hdr++; 652 } 653 return hdr; 654 } 655 656 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 657 int lineno, void *ptr, size_t size) 658 { 659 void *p; 660 uint32_t exceptions = malloc_lock(ctx); 661 662 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 663 malloc_unlock(ctx, exceptions); 664 return p; 665 } 666 667 #define realloc_unlocked(ctx, ptr, size) \ 668 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 669 670 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 671 int lineno, size_t alignment, size_t size) 672 { 673 struct mdbg_hdr *hdr; 674 uint32_t exceptions = malloc_lock(ctx); 675 676 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 677 alignment, size, ctx); 678 if (hdr) { 679 mdbg_update_hdr(hdr, fname, lineno, size); 680 hdr++; 681 } 682 malloc_unlock(ctx, exceptions); 683 return hdr; 684 } 685 686 687 static void *get_payload_start_size(void *raw_buf, size_t *size) 688 { 689 struct mdbg_hdr *hdr = raw_buf; 690 691 assert(bget_buf_size(hdr) >= hdr->pl_size); 692 *size = hdr->pl_size; 693 return hdr + 1; 694 } 695 696 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 697 { 698 struct bpool_iterator itr; 699 void *b; 700 uint32_t exceptions = malloc_lock(ctx); 701 702 raw_malloc_validate_pools(ctx); 703 704 BPOOL_FOREACH(ctx, &itr, &b) { 705 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 706 707 assert_header(hdr); 708 709 if (bufdump > 0) { 710 const char *fname = hdr->fname; 711 712 if (!fname) 713 fname = "unknown"; 714 715 IMSG("buffer: %d bytes %s:%d", 716 hdr->pl_size, fname, hdr->line); 717 } 718 } 719 720 malloc_unlock(ctx, exceptions); 721 } 722 723 void *mdbg_malloc(const char *fname, int lineno, size_t size) 724 { 725 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 726 } 727 728 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 729 { 730 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 731 } 732 733 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 734 { 735 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 736 } 737 738 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 739 size_t size) 740 { 741 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 742 } 743 744 #if __STDC_VERSION__ >= 201112L 745 void *mdbg_aligned_alloc(const char *fname, int lineno, size_t alignment, 746 size_t size) 747 { 748 if (size % alignment) 749 return NULL; 750 751 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 752 } 753 #endif /* __STDC_VERSION__ */ 754 755 void mdbg_check(int bufdump) 756 { 757 gen_mdbg_check(&malloc_ctx, bufdump); 758 } 759 760 /* 761 * Since malloc debug is enabled, malloc() and friends are redirected by macros 762 * to mdbg_malloc() etc. 763 * We still want to export the standard entry points in case they are referenced 764 * by the application, either directly or via external libraries. 765 */ 766 #undef malloc 767 void *malloc(size_t size) 768 { 769 return mdbg_malloc(__FILE__, __LINE__, size); 770 } 771 772 #undef calloc 773 void *calloc(size_t nmemb, size_t size) 774 { 775 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 776 } 777 778 #undef realloc 779 void *realloc(void *ptr, size_t size) 780 { 781 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 782 } 783 784 #else /* ENABLE_MDBG */ 785 786 void *malloc(size_t size) 787 { 788 void *p; 789 uint32_t exceptions = malloc_lock(&malloc_ctx); 790 791 p = raw_malloc(0, 0, size, &malloc_ctx); 792 malloc_unlock(&malloc_ctx, exceptions); 793 return p; 794 } 795 796 static void free_helper(void *ptr, bool wipe) 797 { 798 uint32_t exceptions = malloc_lock(&malloc_ctx); 799 800 raw_free(ptr, &malloc_ctx, wipe); 801 malloc_unlock(&malloc_ctx, exceptions); 802 } 803 804 void *calloc(size_t nmemb, size_t size) 805 { 806 void *p; 807 uint32_t exceptions = malloc_lock(&malloc_ctx); 808 809 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 810 malloc_unlock(&malloc_ctx, exceptions); 811 return p; 812 } 813 814 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 815 size_t size) 816 { 817 return raw_realloc(ptr, 0, 0, size, ctx); 818 } 819 820 void *realloc(void *ptr, size_t size) 821 { 822 void *p; 823 uint32_t exceptions = malloc_lock(&malloc_ctx); 824 825 p = realloc_unlocked(&malloc_ctx, ptr, size); 826 malloc_unlock(&malloc_ctx, exceptions); 827 return p; 828 } 829 830 void *memalign(size_t alignment, size_t size) 831 { 832 void *p; 833 uint32_t exceptions = malloc_lock(&malloc_ctx); 834 835 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 836 malloc_unlock(&malloc_ctx, exceptions); 837 return p; 838 } 839 840 #if __STDC_VERSION__ >= 201112L 841 void *aligned_alloc(size_t alignment, size_t size) 842 { 843 if (size % alignment) 844 return NULL; 845 846 return memalign(alignment, size); 847 } 848 #endif /* __STDC_VERSION__ */ 849 850 static void *get_payload_start_size(void *ptr, size_t *size) 851 { 852 *size = bget_buf_size(ptr); 853 return ptr; 854 } 855 856 #endif 857 858 void free(void *ptr) 859 { 860 free_helper(ptr, false); 861 } 862 863 void free_wipe(void *ptr) 864 { 865 free_helper(ptr, true); 866 } 867 868 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 869 { 870 uint32_t exceptions = malloc_lock(ctx); 871 872 raw_malloc_add_pool(ctx, buf, len); 873 malloc_unlock(ctx, exceptions); 874 } 875 876 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 877 void *buf, size_t len) 878 { 879 uint32_t exceptions = malloc_lock(ctx); 880 bool ret = false; 881 882 ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len); 883 malloc_unlock(ctx, exceptions); 884 885 return ret; 886 } 887 888 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 889 void *buf, size_t len) 890 { 891 bool ret = false; 892 uint32_t exceptions = malloc_lock(ctx); 893 894 ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len); 895 malloc_unlock(ctx, exceptions); 896 return ret; 897 } 898 899 size_t raw_malloc_get_ctx_size(void) 900 { 901 return sizeof(struct malloc_ctx); 902 } 903 904 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 905 { 906 memset(ctx, 0, sizeof(*ctx)); 907 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 908 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 909 } 910 911 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 912 { 913 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 914 uintptr_t start = (uintptr_t)buf; 915 uintptr_t end = start + len; 916 void *p = NULL; 917 size_t l = 0; 918 919 start = ROUNDUP(start, SizeQuant); 920 end = ROUNDDOWN(end, SizeQuant); 921 922 if (start > end || (end - start) < min_len) { 923 DMSG("Skipping too small pool"); 924 return; 925 } 926 927 /* First pool requires a bigger size */ 928 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 929 DMSG("Skipping too small initial pool"); 930 return; 931 } 932 933 tag_asan_free((void *)start, end - start); 934 bpool((void *)start, end - start, &ctx->poolset); 935 l = ctx->pool_len + 1; 936 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 937 assert(p); 938 ctx->pool = p; 939 ctx->pool[ctx->pool_len].buf = (void *)start; 940 ctx->pool[ctx->pool_len].len = end - start; 941 #ifdef BufStats 942 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 943 #endif 944 ctx->pool_len = l; 945 } 946 947 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 948 void *buf, size_t len) 949 { 950 uintptr_t buf_start = (uintptr_t)strip_tag(buf); 951 uintptr_t buf_end = buf_start + len; 952 size_t n = 0; 953 954 raw_malloc_validate_pools(ctx); 955 956 for (n = 0; n < ctx->pool_len; n++) { 957 uintptr_t pool_start = (uintptr_t)strip_tag(ctx->pool[n].buf); 958 uintptr_t pool_end = pool_start + ctx->pool[n].len; 959 960 if (buf_start > buf_end || pool_start > pool_end) 961 return true; /* Wrapping buffers, shouldn't happen */ 962 963 if ((buf_start >= pool_start && buf_start < pool_end) || 964 (buf_end > pool_start && buf_end < pool_end)) 965 return true; 966 } 967 968 return false; 969 } 970 971 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 972 void *buf, size_t len) 973 { 974 struct bpool_iterator itr = { }; 975 void *b = NULL; 976 uint8_t *start_buf = strip_tag(buf); 977 uint8_t *end_buf = start_buf + len; 978 979 raw_malloc_validate_pools(ctx); 980 981 /* Check for wrapping */ 982 if (start_buf > end_buf) 983 return false; 984 985 BPOOL_FOREACH(ctx, &itr, &b) { 986 uint8_t *start_b = NULL; 987 uint8_t *end_b = NULL; 988 size_t s = 0; 989 990 start_b = strip_tag(get_payload_start_size(b, &s)); 991 end_b = start_b + s; 992 if (start_buf >= start_b && end_buf <= end_b) 993 return true; 994 } 995 996 return false; 997 } 998 999 #ifdef CFG_WITH_STATS 1000 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct pta_stats_alloc *stats) 1001 { 1002 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 1003 stats->allocated = ctx->poolset.totalloc; 1004 } 1005 #endif 1006 1007 void malloc_add_pool(void *buf, size_t len) 1008 { 1009 gen_malloc_add_pool(&malloc_ctx, buf, len); 1010 } 1011 1012 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 1013 { 1014 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 1015 } 1016 1017 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 1018 { 1019 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 1020 } 1021 1022 #ifdef CFG_NS_VIRTUALIZATION 1023 1024 #ifndef ENABLE_MDBG 1025 1026 void *nex_malloc(size_t size) 1027 { 1028 void *p; 1029 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1030 1031 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 1032 malloc_unlock(&nex_malloc_ctx, exceptions); 1033 return p; 1034 } 1035 1036 void *nex_calloc(size_t nmemb, size_t size) 1037 { 1038 void *p; 1039 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1040 1041 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 1042 malloc_unlock(&nex_malloc_ctx, exceptions); 1043 return p; 1044 } 1045 1046 void *nex_realloc(void *ptr, size_t size) 1047 { 1048 void *p; 1049 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1050 1051 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 1052 malloc_unlock(&nex_malloc_ctx, exceptions); 1053 return p; 1054 } 1055 1056 void *nex_memalign(size_t alignment, size_t size) 1057 { 1058 void *p; 1059 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1060 1061 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 1062 malloc_unlock(&nex_malloc_ctx, exceptions); 1063 return p; 1064 } 1065 1066 void nex_free(void *ptr) 1067 { 1068 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1069 1070 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 1071 malloc_unlock(&nex_malloc_ctx, exceptions); 1072 } 1073 1074 #else /* ENABLE_MDBG */ 1075 1076 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 1077 { 1078 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1079 } 1080 1081 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1082 { 1083 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1084 } 1085 1086 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1087 { 1088 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1089 } 1090 1091 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1092 size_t size) 1093 { 1094 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1095 } 1096 1097 void nex_mdbg_check(int bufdump) 1098 { 1099 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1100 } 1101 1102 void nex_free(void *ptr) 1103 { 1104 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1105 1106 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1107 malloc_unlock(&nex_malloc_ctx, exceptions); 1108 } 1109 1110 #endif /* ENABLE_MDBG */ 1111 1112 void nex_malloc_add_pool(void *buf, size_t len) 1113 { 1114 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1115 } 1116 1117 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1118 { 1119 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1120 } 1121 1122 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1123 { 1124 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1125 } 1126 1127 #ifdef BufStats 1128 1129 void nex_malloc_reset_stats(void) 1130 { 1131 gen_malloc_reset_stats(&nex_malloc_ctx); 1132 } 1133 1134 void nex_malloc_get_stats(struct pta_stats_alloc *stats) 1135 { 1136 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1137 } 1138 1139 #endif 1140 1141 #endif 1142