1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2022, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <memtag.h> 85 #include <stdbool.h> 86 #include <stdint.h> 87 #include <stdlib_ext.h> 88 #include <stdlib.h> 89 #include <string.h> 90 #include <trace.h> 91 #include <util.h> 92 93 #if defined(__KERNEL__) 94 /* Compiling for TEE Core */ 95 #include <kernel/asan.h> 96 #include <kernel/spinlock.h> 97 #include <kernel/unwind.h> 98 99 static void *memset_unchecked(void *s, int c, size_t n) 100 { 101 return asan_memset_unchecked(s, c, n); 102 } 103 104 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 105 size_t n) 106 { 107 return asan_memcpy_unchecked(dst, src, n); 108 } 109 110 #else /*__KERNEL__*/ 111 /* Compiling for TA */ 112 113 static void *memset_unchecked(void *s, int c, size_t n) 114 { 115 return memset(s, c, n); 116 } 117 118 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 119 size_t n) 120 { 121 return memcpy(dst, src, n); 122 } 123 124 #endif /*__KERNEL__*/ 125 126 #include "bget.c" /* this is ugly, but this is bget */ 127 128 struct malloc_pool { 129 void *buf; 130 size_t len; 131 }; 132 133 struct malloc_ctx { 134 struct bpoolset poolset; 135 struct malloc_pool *pool; 136 size_t pool_len; 137 #ifdef BufStats 138 struct malloc_stats mstats; 139 #endif 140 #ifdef __KERNEL__ 141 unsigned int spinlock; 142 #endif 143 }; 144 145 #ifdef __KERNEL__ 146 147 static uint32_t malloc_lock(struct malloc_ctx *ctx) 148 { 149 return cpu_spin_lock_xsave(&ctx->spinlock); 150 } 151 152 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 153 { 154 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 155 } 156 157 #else /* __KERNEL__ */ 158 159 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 160 { 161 return 0; 162 } 163 164 static void malloc_unlock(struct malloc_ctx *ctx __unused, 165 uint32_t exceptions __unused) 166 { 167 } 168 169 #endif /* __KERNEL__ */ 170 171 #define DEFINE_CTX(name) struct malloc_ctx name = \ 172 { .poolset = { .freelist = { {0, 0}, \ 173 {&name.poolset.freelist, \ 174 &name.poolset.freelist}}}} 175 176 static DEFINE_CTX(malloc_ctx); 177 178 #ifdef CFG_VIRTUALIZATION 179 static __nex_data DEFINE_CTX(nex_malloc_ctx); 180 #endif 181 182 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 183 { 184 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 185 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 186 print_kernel_stack(); 187 #endif 188 } 189 190 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 191 static __maybe_unused bufsize bget_buf_size(void *buf) 192 { 193 bufsize osize; /* Old size of buffer */ 194 struct bhead *b; 195 196 b = BH(((char *)buf) - sizeof(struct bhead)); 197 osize = -b->bsize; 198 #ifdef BECtl 199 if (osize == 0) { 200 /* Buffer acquired directly through acqfcn. */ 201 struct bdhead *bd; 202 203 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 204 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 205 } else 206 #endif 207 osize -= sizeof(struct bhead); 208 assert(osize > 0); 209 return osize; 210 } 211 212 static void *maybe_tag_buf(void *buf, size_t __maybe_unused requested_size) 213 { 214 if (!buf) 215 return NULL; 216 217 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant); 218 219 if (MEMTAG_IS_ENABLED) { 220 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE); 221 222 /* 223 * Allocated buffer can be larger than requested when 224 * allocating with memalign(), but we should never tag more 225 * than allocated. 226 */ 227 assert(bget_buf_size(buf) >= sz); 228 return memtag_set_random_tags(buf, sz); 229 } 230 231 #if defined(__KERNEL__) 232 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 233 asan_tag_access(buf, (uint8_t *)buf + requested_size); 234 #endif 235 return buf; 236 } 237 238 static void *maybe_untag_buf(void *buf) 239 { 240 if (!buf) 241 return NULL; 242 243 if (MEMTAG_IS_ENABLED) { 244 size_t sz = 0; 245 246 memtag_assert_tag(buf); /* Trying to catch double free early */ 247 sz = bget_buf_size(memtag_strip_tag(buf)); 248 return memtag_set_tags(buf, sz, 0); 249 } 250 251 #if defined(__KERNEL__) 252 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 253 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 254 #endif 255 return buf; 256 } 257 258 static void *strip_tag(void *buf) 259 { 260 if (MEMTAG_IS_ENABLED) 261 return memtag_strip_tag(buf); 262 return buf; 263 } 264 265 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 266 { 267 #if defined(__KERNEL__) 268 asan_tag_heap_free(buf, (uint8_t *)buf + len); 269 #endif 270 } 271 272 #ifdef BufStats 273 274 static void *raw_malloc_return_hook(void *p, size_t requested_size, 275 struct malloc_ctx *ctx) 276 { 277 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 278 ctx->mstats.max_allocated = ctx->poolset.totalloc; 279 280 if (!p) { 281 ctx->mstats.num_alloc_fail++; 282 print_oom(requested_size, ctx); 283 if (requested_size > ctx->mstats.biggest_alloc_fail) { 284 ctx->mstats.biggest_alloc_fail = requested_size; 285 ctx->mstats.biggest_alloc_fail_used = 286 ctx->poolset.totalloc; 287 } 288 } 289 290 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 291 } 292 293 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 294 { 295 uint32_t exceptions = malloc_lock(ctx); 296 297 ctx->mstats.max_allocated = 0; 298 ctx->mstats.num_alloc_fail = 0; 299 ctx->mstats.biggest_alloc_fail = 0; 300 ctx->mstats.biggest_alloc_fail_used = 0; 301 malloc_unlock(ctx, exceptions); 302 } 303 304 void malloc_reset_stats(void) 305 { 306 gen_malloc_reset_stats(&malloc_ctx); 307 } 308 309 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 310 struct malloc_stats *stats) 311 { 312 uint32_t exceptions = malloc_lock(ctx); 313 314 raw_malloc_get_stats(ctx, stats); 315 malloc_unlock(ctx, exceptions); 316 } 317 318 void malloc_get_stats(struct malloc_stats *stats) 319 { 320 gen_malloc_get_stats(&malloc_ctx, stats); 321 } 322 323 #else /* BufStats */ 324 325 static void *raw_malloc_return_hook(void *p, size_t requested_size, 326 struct malloc_ctx *ctx ) 327 { 328 if (!p) 329 print_oom(requested_size, ctx); 330 331 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 332 } 333 334 #endif /* BufStats */ 335 336 #ifdef BufValid 337 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 338 { 339 size_t n; 340 341 for (n = 0; n < ctx->pool_len; n++) 342 bpoolv(ctx->pool[n].buf); 343 } 344 #else 345 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 346 { 347 } 348 #endif 349 350 struct bpool_iterator { 351 struct bfhead *next_buf; 352 size_t pool_idx; 353 }; 354 355 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 356 struct bpool_iterator *iterator) 357 { 358 iterator->pool_idx = 0; 359 iterator->next_buf = BFH(ctx->pool[0].buf); 360 } 361 362 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 363 size_t *len, bool *isfree) 364 { 365 struct bfhead *b = iterator->next_buf; 366 bufsize bs = b->bh.bsize; 367 368 if (bs == ESent) 369 return false; 370 371 if (bs < 0) { 372 /* Allocated buffer */ 373 bs = -bs; 374 375 *isfree = false; 376 } else { 377 /* Free Buffer */ 378 *isfree = true; 379 380 /* Assert that the free list links are intact */ 381 assert(b->ql.blink->ql.flink == b); 382 assert(b->ql.flink->ql.blink == b); 383 } 384 385 *buf = (uint8_t *)b + sizeof(struct bhead); 386 *len = bs - sizeof(struct bhead); 387 388 iterator->next_buf = BFH((uint8_t *)b + bs); 389 return true; 390 } 391 392 static bool bpool_foreach(struct malloc_ctx *ctx, 393 struct bpool_iterator *iterator, void **buf) 394 { 395 while (true) { 396 size_t len; 397 bool isfree; 398 399 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 400 if (isfree) 401 continue; 402 return true; 403 } 404 405 if ((iterator->pool_idx + 1) >= ctx->pool_len) 406 return false; 407 408 iterator->pool_idx++; 409 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 410 } 411 } 412 413 /* Convenience macro for looping over all allocated buffers */ 414 #define BPOOL_FOREACH(ctx, iterator, bp) \ 415 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 416 bpool_foreach((ctx),(iterator), (bp));) 417 418 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 419 size_t pl_size, struct malloc_ctx *ctx) 420 { 421 void *ptr = NULL; 422 bufsize s; 423 424 if (!alignment || !IS_POWER_OF_TWO(alignment)) 425 return NULL; 426 427 raw_malloc_validate_pools(ctx); 428 429 /* Compute total size, excluding the header */ 430 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 431 goto out; 432 433 /* BGET doesn't like 0 sized allocations */ 434 if (!s) 435 s++; 436 437 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 438 out: 439 return raw_malloc_return_hook(ptr, pl_size, ctx); 440 } 441 442 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 443 struct malloc_ctx *ctx) 444 { 445 /* 446 * Note that we're feeding SizeQ as alignment, this is the smallest 447 * alignment that bget() can use. 448 */ 449 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 450 } 451 452 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 453 { 454 raw_malloc_validate_pools(ctx); 455 456 if (ptr) 457 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 458 } 459 460 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 461 size_t pl_size, struct malloc_ctx *ctx) 462 { 463 void *ptr = NULL; 464 bufsize s; 465 466 raw_malloc_validate_pools(ctx); 467 468 /* Compute total size, excluding hdr_size */ 469 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 470 goto out; 471 if (ADD_OVERFLOW(s, ftr_size, &s)) 472 goto out; 473 474 /* BGET doesn't like 0 sized allocations */ 475 if (!s) 476 s++; 477 478 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 479 out: 480 return raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 481 } 482 483 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 484 size_t pl_size, struct malloc_ctx *ctx) 485 { 486 void *p = NULL; 487 bufsize s; 488 489 /* Compute total size */ 490 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 491 goto out; 492 if (ADD_OVERFLOW(s, ftr_size, &s)) 493 goto out; 494 495 raw_malloc_validate_pools(ctx); 496 497 /* BGET doesn't like 0 sized allocations */ 498 if (!s) 499 s++; 500 501 p = bget(0, 0, s, &ctx->poolset); 502 503 if (p && ptr) { 504 void *old_ptr = maybe_untag_buf(ptr); 505 bufsize old_sz = bget_buf_size(old_ptr); 506 507 if (old_sz < s) { 508 memcpy(p, old_ptr, old_sz); 509 #ifndef __KERNEL__ 510 /* User space reallocations are always zeroed */ 511 memset((uint8_t *)p + old_sz, 0, s - old_sz); 512 #endif 513 } else { 514 memcpy(p, old_ptr, s); 515 } 516 517 brel(old_ptr, &ctx->poolset, false /*!wipe*/); 518 } 519 out: 520 return raw_malloc_return_hook(p, pl_size, ctx); 521 } 522 523 #ifdef ENABLE_MDBG 524 525 struct mdbg_hdr { 526 const char *fname; 527 uint16_t line; 528 uint32_t pl_size; 529 uint32_t magic; 530 #if defined(ARM64) 531 uint64_t pad; 532 #endif 533 }; 534 535 #define MDBG_HEADER_MAGIC 0xadadadad 536 #define MDBG_FOOTER_MAGIC 0xecececec 537 538 static size_t mdbg_get_ftr_size(size_t pl_size) 539 { 540 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 541 542 return ftr_pad + sizeof(uint32_t); 543 } 544 545 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 546 { 547 uint32_t *footer; 548 549 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 550 mdbg_get_ftr_size(hdr->pl_size)); 551 footer--; 552 return footer; 553 } 554 555 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 556 int lineno, size_t pl_size) 557 { 558 uint32_t *footer; 559 560 hdr->fname = fname; 561 hdr->line = lineno; 562 hdr->pl_size = pl_size; 563 hdr->magic = MDBG_HEADER_MAGIC; 564 565 footer = mdbg_get_footer(hdr); 566 *footer = MDBG_FOOTER_MAGIC; 567 } 568 569 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 570 int lineno, size_t size) 571 { 572 struct mdbg_hdr *hdr; 573 uint32_t exceptions = malloc_lock(ctx); 574 575 /* 576 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 577 */ 578 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 579 580 hdr = raw_malloc(sizeof(struct mdbg_hdr), 581 mdbg_get_ftr_size(size), size, ctx); 582 if (hdr) { 583 mdbg_update_hdr(hdr, fname, lineno, size); 584 hdr++; 585 } 586 587 malloc_unlock(ctx, exceptions); 588 return hdr; 589 } 590 591 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 592 { 593 assert(hdr->magic == MDBG_HEADER_MAGIC); 594 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 595 } 596 597 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 598 { 599 struct mdbg_hdr *hdr = ptr; 600 601 if (hdr) { 602 hdr--; 603 assert_header(hdr); 604 hdr->magic = 0; 605 *mdbg_get_footer(hdr) = 0; 606 raw_free(hdr, ctx, wipe); 607 } 608 } 609 610 static void free_helper(void *ptr, bool wipe) 611 { 612 uint32_t exceptions = malloc_lock(&malloc_ctx); 613 614 gen_mdbg_free(&malloc_ctx, ptr, wipe); 615 malloc_unlock(&malloc_ctx, exceptions); 616 } 617 618 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 619 size_t nmemb, size_t size) 620 { 621 struct mdbg_hdr *hdr; 622 uint32_t exceptions = malloc_lock(ctx); 623 624 hdr = raw_calloc(sizeof(struct mdbg_hdr), 625 mdbg_get_ftr_size(nmemb * size), nmemb, size, 626 ctx); 627 if (hdr) { 628 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 629 hdr++; 630 } 631 malloc_unlock(ctx, exceptions); 632 return hdr; 633 } 634 635 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 636 int lineno, void *ptr, size_t size) 637 { 638 struct mdbg_hdr *hdr = ptr; 639 640 if (hdr) { 641 hdr--; 642 assert_header(hdr); 643 } 644 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 645 mdbg_get_ftr_size(size), size, ctx); 646 if (hdr) { 647 mdbg_update_hdr(hdr, fname, lineno, size); 648 hdr++; 649 } 650 return hdr; 651 } 652 653 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 654 int lineno, void *ptr, size_t size) 655 { 656 void *p; 657 uint32_t exceptions = malloc_lock(ctx); 658 659 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 660 malloc_unlock(ctx, exceptions); 661 return p; 662 } 663 664 #define realloc_unlocked(ctx, ptr, size) \ 665 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 666 667 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 668 int lineno, size_t alignment, size_t size) 669 { 670 struct mdbg_hdr *hdr; 671 uint32_t exceptions = malloc_lock(ctx); 672 673 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 674 alignment, size, ctx); 675 if (hdr) { 676 mdbg_update_hdr(hdr, fname, lineno, size); 677 hdr++; 678 } 679 malloc_unlock(ctx, exceptions); 680 return hdr; 681 } 682 683 684 static void *get_payload_start_size(void *raw_buf, size_t *size) 685 { 686 struct mdbg_hdr *hdr = raw_buf; 687 688 assert(bget_buf_size(hdr) >= hdr->pl_size); 689 *size = hdr->pl_size; 690 return hdr + 1; 691 } 692 693 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 694 { 695 struct bpool_iterator itr; 696 void *b; 697 uint32_t exceptions = malloc_lock(ctx); 698 699 raw_malloc_validate_pools(ctx); 700 701 BPOOL_FOREACH(ctx, &itr, &b) { 702 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 703 704 assert_header(hdr); 705 706 if (bufdump > 0) { 707 const char *fname = hdr->fname; 708 709 if (!fname) 710 fname = "unknown"; 711 712 IMSG("buffer: %d bytes %s:%d\n", 713 hdr->pl_size, fname, hdr->line); 714 } 715 } 716 717 malloc_unlock(ctx, exceptions); 718 } 719 720 void *mdbg_malloc(const char *fname, int lineno, size_t size) 721 { 722 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 723 } 724 725 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 726 { 727 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 728 } 729 730 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 731 { 732 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 733 } 734 735 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 736 size_t size) 737 { 738 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 739 } 740 741 #if __STDC_VERSION__ >= 201112L 742 void *mdbg_aligned_alloc(const char *fname, int lineno, size_t alignment, 743 size_t size) 744 { 745 if (size % alignment) 746 return NULL; 747 748 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 749 } 750 #endif /* __STDC_VERSION__ */ 751 752 void mdbg_check(int bufdump) 753 { 754 gen_mdbg_check(&malloc_ctx, bufdump); 755 } 756 757 /* 758 * Since malloc debug is enabled, malloc() and friends are redirected by macros 759 * to mdbg_malloc() etc. 760 * We still want to export the standard entry points in case they are referenced 761 * by the application, either directly or via external libraries. 762 */ 763 #undef malloc 764 void *malloc(size_t size) 765 { 766 return mdbg_malloc(__FILE__, __LINE__, size); 767 } 768 769 #undef calloc 770 void *calloc(size_t nmemb, size_t size) 771 { 772 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 773 } 774 775 #undef realloc 776 void *realloc(void *ptr, size_t size) 777 { 778 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 779 } 780 781 #else /* ENABLE_MDBG */ 782 783 void *malloc(size_t size) 784 { 785 void *p; 786 uint32_t exceptions = malloc_lock(&malloc_ctx); 787 788 p = raw_malloc(0, 0, size, &malloc_ctx); 789 malloc_unlock(&malloc_ctx, exceptions); 790 return p; 791 } 792 793 static void free_helper(void *ptr, bool wipe) 794 { 795 uint32_t exceptions = malloc_lock(&malloc_ctx); 796 797 raw_free(ptr, &malloc_ctx, wipe); 798 malloc_unlock(&malloc_ctx, exceptions); 799 } 800 801 void *calloc(size_t nmemb, size_t size) 802 { 803 void *p; 804 uint32_t exceptions = malloc_lock(&malloc_ctx); 805 806 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 807 malloc_unlock(&malloc_ctx, exceptions); 808 return p; 809 } 810 811 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 812 size_t size) 813 { 814 return raw_realloc(ptr, 0, 0, size, ctx); 815 } 816 817 void *realloc(void *ptr, size_t size) 818 { 819 void *p; 820 uint32_t exceptions = malloc_lock(&malloc_ctx); 821 822 p = realloc_unlocked(&malloc_ctx, ptr, size); 823 malloc_unlock(&malloc_ctx, exceptions); 824 return p; 825 } 826 827 void *memalign(size_t alignment, size_t size) 828 { 829 void *p; 830 uint32_t exceptions = malloc_lock(&malloc_ctx); 831 832 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 833 malloc_unlock(&malloc_ctx, exceptions); 834 return p; 835 } 836 837 #if __STDC_VERSION__ >= 201112L 838 void *aligned_alloc(size_t alignment, size_t size) 839 { 840 if (size % alignment) 841 return NULL; 842 843 return memalign(alignment, size); 844 } 845 #endif /* __STDC_VERSION__ */ 846 847 static void *get_payload_start_size(void *ptr, size_t *size) 848 { 849 *size = bget_buf_size(ptr); 850 return ptr; 851 } 852 853 #endif 854 855 void free(void *ptr) 856 { 857 free_helper(ptr, false); 858 } 859 860 void free_wipe(void *ptr) 861 { 862 free_helper(ptr, true); 863 } 864 865 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 866 { 867 uint32_t exceptions = malloc_lock(ctx); 868 869 raw_malloc_add_pool(ctx, buf, len); 870 malloc_unlock(ctx, exceptions); 871 } 872 873 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 874 void *buf, size_t len) 875 { 876 uint32_t exceptions = malloc_lock(ctx); 877 bool ret = false; 878 879 ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len); 880 malloc_unlock(ctx, exceptions); 881 882 return ret; 883 } 884 885 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 886 void *buf, size_t len) 887 { 888 bool ret = false; 889 uint32_t exceptions = malloc_lock(ctx); 890 891 ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len); 892 malloc_unlock(ctx, exceptions); 893 return ret; 894 } 895 896 size_t raw_malloc_get_ctx_size(void) 897 { 898 return sizeof(struct malloc_ctx); 899 } 900 901 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 902 { 903 memset(ctx, 0, sizeof(*ctx)); 904 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 905 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 906 } 907 908 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 909 { 910 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 911 uintptr_t start = (uintptr_t)buf; 912 uintptr_t end = start + len; 913 void *p = NULL; 914 size_t l = 0; 915 916 start = ROUNDUP(start, SizeQuant); 917 end = ROUNDDOWN(end, SizeQuant); 918 919 if (start > end || (end - start) < min_len) { 920 DMSG("Skipping too small pool"); 921 return; 922 } 923 924 /* First pool requires a bigger size */ 925 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 926 DMSG("Skipping too small initial pool"); 927 return; 928 } 929 930 tag_asan_free((void *)start, end - start); 931 bpool((void *)start, end - start, &ctx->poolset); 932 l = ctx->pool_len + 1; 933 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 934 assert(p); 935 ctx->pool = p; 936 ctx->pool[ctx->pool_len].buf = (void *)start; 937 ctx->pool[ctx->pool_len].len = end - start; 938 #ifdef BufStats 939 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 940 #endif 941 ctx->pool_len = l; 942 } 943 944 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 945 void *buf, size_t len) 946 { 947 uintptr_t buf_start = (uintptr_t) buf; 948 uintptr_t buf_end = buf_start + len; 949 size_t n = 0; 950 951 raw_malloc_validate_pools(ctx); 952 953 for (n = 0; n < ctx->pool_len; n++) { 954 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 955 uintptr_t pool_end = pool_start + ctx->pool[n].len; 956 957 if (buf_start > buf_end || pool_start > pool_end) 958 return true; /* Wrapping buffers, shouldn't happen */ 959 960 if ((buf_start >= pool_start && buf_start < pool_end) || 961 (buf_end >= pool_start && buf_end < pool_end)) 962 return true; 963 } 964 965 return false; 966 } 967 968 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 969 void *buf, size_t len) 970 { 971 struct bpool_iterator itr = { }; 972 void *b = NULL; 973 uint8_t *start_buf = strip_tag(buf); 974 uint8_t *end_buf = start_buf + len; 975 976 raw_malloc_validate_pools(ctx); 977 978 /* Check for wrapping */ 979 if (start_buf > end_buf) 980 return false; 981 982 BPOOL_FOREACH(ctx, &itr, &b) { 983 uint8_t *start_b = NULL; 984 uint8_t *end_b = NULL; 985 size_t s = 0; 986 987 start_b = get_payload_start_size(b, &s); 988 end_b = start_b + s; 989 if (start_buf >= start_b && end_buf <= end_b) 990 return true; 991 } 992 993 return false; 994 } 995 996 #ifdef CFG_WITH_STATS 997 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats) 998 { 999 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 1000 stats->allocated = ctx->poolset.totalloc; 1001 } 1002 #endif 1003 1004 void malloc_add_pool(void *buf, size_t len) 1005 { 1006 gen_malloc_add_pool(&malloc_ctx, buf, len); 1007 } 1008 1009 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 1010 { 1011 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 1012 } 1013 1014 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 1015 { 1016 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 1017 } 1018 1019 #ifdef CFG_VIRTUALIZATION 1020 1021 #ifndef ENABLE_MDBG 1022 1023 void *nex_malloc(size_t size) 1024 { 1025 void *p; 1026 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1027 1028 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 1029 malloc_unlock(&nex_malloc_ctx, exceptions); 1030 return p; 1031 } 1032 1033 void *nex_calloc(size_t nmemb, size_t size) 1034 { 1035 void *p; 1036 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1037 1038 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 1039 malloc_unlock(&nex_malloc_ctx, exceptions); 1040 return p; 1041 } 1042 1043 void *nex_realloc(void *ptr, size_t size) 1044 { 1045 void *p; 1046 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1047 1048 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 1049 malloc_unlock(&nex_malloc_ctx, exceptions); 1050 return p; 1051 } 1052 1053 void *nex_memalign(size_t alignment, size_t size) 1054 { 1055 void *p; 1056 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1057 1058 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 1059 malloc_unlock(&nex_malloc_ctx, exceptions); 1060 return p; 1061 } 1062 1063 void nex_free(void *ptr) 1064 { 1065 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1066 1067 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 1068 malloc_unlock(&nex_malloc_ctx, exceptions); 1069 } 1070 1071 #else /* ENABLE_MDBG */ 1072 1073 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 1074 { 1075 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1076 } 1077 1078 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1079 { 1080 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1081 } 1082 1083 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1084 { 1085 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1086 } 1087 1088 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1089 size_t size) 1090 { 1091 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1092 } 1093 1094 void nex_mdbg_check(int bufdump) 1095 { 1096 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1097 } 1098 1099 void nex_free(void *ptr) 1100 { 1101 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1102 1103 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1104 malloc_unlock(&nex_malloc_ctx, exceptions); 1105 } 1106 1107 #endif /* ENABLE_MDBG */ 1108 1109 void nex_malloc_add_pool(void *buf, size_t len) 1110 { 1111 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1112 } 1113 1114 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1115 { 1116 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1117 } 1118 1119 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1120 { 1121 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1122 } 1123 1124 #ifdef BufStats 1125 1126 void nex_malloc_reset_stats(void) 1127 { 1128 gen_malloc_reset_stats(&nex_malloc_ctx); 1129 } 1130 1131 void nex_malloc_get_stats(struct malloc_stats *stats) 1132 { 1133 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1134 } 1135 1136 #endif 1137 1138 #endif 1139