1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2022, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <memtag.h> 85 #include <stdbool.h> 86 #include <stdint.h> 87 #include <stdlib_ext.h> 88 #include <stdlib.h> 89 #include <string.h> 90 #include <trace.h> 91 #include <util.h> 92 93 #if defined(__KERNEL__) 94 /* Compiling for TEE Core */ 95 #include <kernel/asan.h> 96 #include <kernel/spinlock.h> 97 #include <kernel/unwind.h> 98 99 static void *memset_unchecked(void *s, int c, size_t n) 100 { 101 return asan_memset_unchecked(s, c, n); 102 } 103 104 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 105 size_t n) 106 { 107 return asan_memcpy_unchecked(dst, src, n); 108 } 109 110 #else /*__KERNEL__*/ 111 /* Compiling for TA */ 112 113 static void *memset_unchecked(void *s, int c, size_t n) 114 { 115 return memset(s, c, n); 116 } 117 118 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 119 size_t n) 120 { 121 return memcpy(dst, src, n); 122 } 123 124 #endif /*__KERNEL__*/ 125 126 #include "bget.c" /* this is ugly, but this is bget */ 127 128 struct malloc_pool { 129 void *buf; 130 size_t len; 131 }; 132 133 struct malloc_ctx { 134 struct bpoolset poolset; 135 struct malloc_pool *pool; 136 size_t pool_len; 137 #ifdef BufStats 138 struct malloc_stats mstats; 139 #endif 140 #ifdef __KERNEL__ 141 unsigned int spinlock; 142 #endif 143 }; 144 145 #ifdef __KERNEL__ 146 147 static uint32_t malloc_lock(struct malloc_ctx *ctx) 148 { 149 return cpu_spin_lock_xsave(&ctx->spinlock); 150 } 151 152 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 153 { 154 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 155 } 156 157 #else /* __KERNEL__ */ 158 159 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 160 { 161 return 0; 162 } 163 164 static void malloc_unlock(struct malloc_ctx *ctx __unused, 165 uint32_t exceptions __unused) 166 { 167 } 168 169 #endif /* __KERNEL__ */ 170 171 #define DEFINE_CTX(name) struct malloc_ctx name = \ 172 { .poolset = { .freelist = { {0, 0}, \ 173 {&name.poolset.freelist, \ 174 &name.poolset.freelist}}}} 175 176 static DEFINE_CTX(malloc_ctx); 177 178 #ifdef CFG_VIRTUALIZATION 179 static __nex_data DEFINE_CTX(nex_malloc_ctx); 180 #endif 181 182 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 183 { 184 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 185 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 186 print_kernel_stack(); 187 #endif 188 } 189 190 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 191 static __maybe_unused bufsize bget_buf_size(void *buf) 192 { 193 bufsize osize; /* Old size of buffer */ 194 struct bhead *b; 195 196 b = BH(((char *)buf) - sizeof(struct bhead)); 197 osize = -b->bsize; 198 #ifdef BECtl 199 if (osize == 0) { 200 /* Buffer acquired directly through acqfcn. */ 201 struct bdhead *bd; 202 203 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 204 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 205 } else 206 #endif 207 osize -= sizeof(struct bhead); 208 assert(osize > 0); 209 return osize; 210 } 211 212 static void *maybe_tag_buf(void *buf, size_t __maybe_unused requested_size) 213 { 214 if (!buf) 215 return NULL; 216 217 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant); 218 219 if (MEMTAG_IS_ENABLED) { 220 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE); 221 222 /* 223 * Allocated buffer can be larger than requested when 224 * allocating with memalign(), but we should never tag more 225 * than allocated. 226 */ 227 assert(bget_buf_size(buf) >= sz); 228 return memtag_set_random_tags(buf, sz); 229 } 230 231 #if defined(__KERNEL__) 232 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 233 asan_tag_access(buf, (uint8_t *)buf + requested_size); 234 #endif 235 return buf; 236 } 237 238 static void *maybe_untag_buf(void *buf) 239 { 240 if (!buf) 241 return NULL; 242 243 if (MEMTAG_IS_ENABLED) { 244 size_t sz = 0; 245 246 memtag_assert_tag(buf); /* Trying to catch double free early */ 247 sz = bget_buf_size(memtag_strip_tag(buf)); 248 return memtag_set_tags(buf, sz, 0); 249 } 250 251 #if defined(__KERNEL__) 252 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 253 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 254 #endif 255 return buf; 256 } 257 258 static void *strip_tag(void *buf) 259 { 260 if (MEMTAG_IS_ENABLED) 261 return memtag_strip_tag(buf); 262 return buf; 263 } 264 265 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 266 { 267 #if defined(__KERNEL__) 268 asan_tag_heap_free(buf, (uint8_t *)buf + len); 269 #endif 270 } 271 272 #ifdef BufStats 273 274 static void *raw_malloc_return_hook(void *p, size_t requested_size, 275 struct malloc_ctx *ctx) 276 { 277 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 278 ctx->mstats.max_allocated = ctx->poolset.totalloc; 279 280 if (!p) { 281 ctx->mstats.num_alloc_fail++; 282 print_oom(requested_size, ctx); 283 if (requested_size > ctx->mstats.biggest_alloc_fail) { 284 ctx->mstats.biggest_alloc_fail = requested_size; 285 ctx->mstats.biggest_alloc_fail_used = 286 ctx->poolset.totalloc; 287 } 288 } 289 290 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 291 } 292 293 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 294 { 295 uint32_t exceptions = malloc_lock(ctx); 296 297 ctx->mstats.max_allocated = 0; 298 ctx->mstats.num_alloc_fail = 0; 299 ctx->mstats.biggest_alloc_fail = 0; 300 ctx->mstats.biggest_alloc_fail_used = 0; 301 malloc_unlock(ctx, exceptions); 302 } 303 304 void malloc_reset_stats(void) 305 { 306 gen_malloc_reset_stats(&malloc_ctx); 307 } 308 309 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 310 struct malloc_stats *stats) 311 { 312 uint32_t exceptions = malloc_lock(ctx); 313 314 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 315 stats->allocated = ctx->poolset.totalloc; 316 malloc_unlock(ctx, exceptions); 317 } 318 319 void malloc_get_stats(struct malloc_stats *stats) 320 { 321 gen_malloc_get_stats(&malloc_ctx, stats); 322 } 323 324 #else /* BufStats */ 325 326 static void *raw_malloc_return_hook(void *p, size_t requested_size, 327 struct malloc_ctx *ctx ) 328 { 329 if (!p) 330 print_oom(requested_size, ctx); 331 332 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 333 } 334 335 #endif /* BufStats */ 336 337 #ifdef BufValid 338 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 339 { 340 size_t n; 341 342 for (n = 0; n < ctx->pool_len; n++) 343 bpoolv(ctx->pool[n].buf); 344 } 345 #else 346 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 347 { 348 } 349 #endif 350 351 struct bpool_iterator { 352 struct bfhead *next_buf; 353 size_t pool_idx; 354 }; 355 356 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 357 struct bpool_iterator *iterator) 358 { 359 iterator->pool_idx = 0; 360 iterator->next_buf = BFH(ctx->pool[0].buf); 361 } 362 363 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 364 size_t *len, bool *isfree) 365 { 366 struct bfhead *b = iterator->next_buf; 367 bufsize bs = b->bh.bsize; 368 369 if (bs == ESent) 370 return false; 371 372 if (bs < 0) { 373 /* Allocated buffer */ 374 bs = -bs; 375 376 *isfree = false; 377 } else { 378 /* Free Buffer */ 379 *isfree = true; 380 381 /* Assert that the free list links are intact */ 382 assert(b->ql.blink->ql.flink == b); 383 assert(b->ql.flink->ql.blink == b); 384 } 385 386 *buf = (uint8_t *)b + sizeof(struct bhead); 387 *len = bs - sizeof(struct bhead); 388 389 iterator->next_buf = BFH((uint8_t *)b + bs); 390 return true; 391 } 392 393 static bool bpool_foreach(struct malloc_ctx *ctx, 394 struct bpool_iterator *iterator, void **buf) 395 { 396 while (true) { 397 size_t len; 398 bool isfree; 399 400 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 401 if (isfree) 402 continue; 403 return true; 404 } 405 406 if ((iterator->pool_idx + 1) >= ctx->pool_len) 407 return false; 408 409 iterator->pool_idx++; 410 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 411 } 412 } 413 414 /* Convenience macro for looping over all allocated buffers */ 415 #define BPOOL_FOREACH(ctx, iterator, bp) \ 416 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 417 bpool_foreach((ctx),(iterator), (bp));) 418 419 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 420 size_t pl_size, struct malloc_ctx *ctx) 421 { 422 void *ptr = NULL; 423 bufsize s; 424 425 if (!alignment || !IS_POWER_OF_TWO(alignment)) 426 return NULL; 427 428 raw_malloc_validate_pools(ctx); 429 430 /* Compute total size, excluding the header */ 431 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 432 goto out; 433 434 /* BGET doesn't like 0 sized allocations */ 435 if (!s) 436 s++; 437 438 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 439 out: 440 return raw_malloc_return_hook(ptr, pl_size, ctx); 441 } 442 443 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 444 struct malloc_ctx *ctx) 445 { 446 /* 447 * Note that we're feeding SizeQ as alignment, this is the smallest 448 * alignment that bget() can use. 449 */ 450 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 451 } 452 453 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 454 { 455 raw_malloc_validate_pools(ctx); 456 457 if (ptr) 458 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 459 } 460 461 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 462 size_t pl_size, struct malloc_ctx *ctx) 463 { 464 void *ptr = NULL; 465 bufsize s; 466 467 raw_malloc_validate_pools(ctx); 468 469 /* Compute total size, excluding hdr_size */ 470 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 471 goto out; 472 if (ADD_OVERFLOW(s, ftr_size, &s)) 473 goto out; 474 475 /* BGET doesn't like 0 sized allocations */ 476 if (!s) 477 s++; 478 479 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 480 out: 481 return raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 482 } 483 484 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 485 size_t pl_size, struct malloc_ctx *ctx) 486 { 487 void *p = NULL; 488 bufsize s; 489 490 /* Compute total size */ 491 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 492 goto out; 493 if (ADD_OVERFLOW(s, ftr_size, &s)) 494 goto out; 495 496 raw_malloc_validate_pools(ctx); 497 498 /* BGET doesn't like 0 sized allocations */ 499 if (!s) 500 s++; 501 502 p = bget(0, 0, s, &ctx->poolset); 503 504 if (p && ptr) { 505 void *old_ptr = maybe_untag_buf(ptr); 506 bufsize old_sz = bget_buf_size(old_ptr); 507 508 if (old_sz < s) { 509 memcpy(p, old_ptr, old_sz); 510 #ifndef __KERNEL__ 511 /* User space reallocations are always zeroed */ 512 memset((uint8_t *)p + old_sz, 0, s - old_sz); 513 #endif 514 } else { 515 memcpy(p, old_ptr, s); 516 } 517 518 brel(old_ptr, &ctx->poolset, false /*!wipe*/); 519 } 520 out: 521 return raw_malloc_return_hook(p, pl_size, ctx); 522 } 523 524 #ifdef ENABLE_MDBG 525 526 struct mdbg_hdr { 527 const char *fname; 528 uint16_t line; 529 uint32_t pl_size; 530 uint32_t magic; 531 #if defined(ARM64) 532 uint64_t pad; 533 #endif 534 }; 535 536 #define MDBG_HEADER_MAGIC 0xadadadad 537 #define MDBG_FOOTER_MAGIC 0xecececec 538 539 static size_t mdbg_get_ftr_size(size_t pl_size) 540 { 541 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 542 543 return ftr_pad + sizeof(uint32_t); 544 } 545 546 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 547 { 548 uint32_t *footer; 549 550 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 551 mdbg_get_ftr_size(hdr->pl_size)); 552 footer--; 553 return footer; 554 } 555 556 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 557 int lineno, size_t pl_size) 558 { 559 uint32_t *footer; 560 561 hdr->fname = fname; 562 hdr->line = lineno; 563 hdr->pl_size = pl_size; 564 hdr->magic = MDBG_HEADER_MAGIC; 565 566 footer = mdbg_get_footer(hdr); 567 *footer = MDBG_FOOTER_MAGIC; 568 } 569 570 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 571 int lineno, size_t size) 572 { 573 struct mdbg_hdr *hdr; 574 uint32_t exceptions = malloc_lock(ctx); 575 576 /* 577 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 578 */ 579 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 580 581 hdr = raw_malloc(sizeof(struct mdbg_hdr), 582 mdbg_get_ftr_size(size), size, ctx); 583 if (hdr) { 584 mdbg_update_hdr(hdr, fname, lineno, size); 585 hdr++; 586 } 587 588 malloc_unlock(ctx, exceptions); 589 return hdr; 590 } 591 592 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 593 { 594 assert(hdr->magic == MDBG_HEADER_MAGIC); 595 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 596 } 597 598 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 599 { 600 struct mdbg_hdr *hdr = ptr; 601 602 if (hdr) { 603 hdr--; 604 assert_header(hdr); 605 hdr->magic = 0; 606 *mdbg_get_footer(hdr) = 0; 607 raw_free(hdr, ctx, wipe); 608 } 609 } 610 611 static void free_helper(void *ptr, bool wipe) 612 { 613 uint32_t exceptions = malloc_lock(&malloc_ctx); 614 615 gen_mdbg_free(&malloc_ctx, ptr, wipe); 616 malloc_unlock(&malloc_ctx, exceptions); 617 } 618 619 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 620 size_t nmemb, size_t size) 621 { 622 struct mdbg_hdr *hdr; 623 uint32_t exceptions = malloc_lock(ctx); 624 625 hdr = raw_calloc(sizeof(struct mdbg_hdr), 626 mdbg_get_ftr_size(nmemb * size), nmemb, size, 627 ctx); 628 if (hdr) { 629 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 630 hdr++; 631 } 632 malloc_unlock(ctx, exceptions); 633 return hdr; 634 } 635 636 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 637 int lineno, void *ptr, size_t size) 638 { 639 struct mdbg_hdr *hdr = ptr; 640 641 if (hdr) { 642 hdr--; 643 assert_header(hdr); 644 } 645 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 646 mdbg_get_ftr_size(size), size, ctx); 647 if (hdr) { 648 mdbg_update_hdr(hdr, fname, lineno, size); 649 hdr++; 650 } 651 return hdr; 652 } 653 654 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 655 int lineno, void *ptr, size_t size) 656 { 657 void *p; 658 uint32_t exceptions = malloc_lock(ctx); 659 660 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 661 malloc_unlock(ctx, exceptions); 662 return p; 663 } 664 665 #define realloc_unlocked(ctx, ptr, size) \ 666 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 667 668 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 669 int lineno, size_t alignment, size_t size) 670 { 671 struct mdbg_hdr *hdr; 672 uint32_t exceptions = malloc_lock(ctx); 673 674 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 675 alignment, size, ctx); 676 if (hdr) { 677 mdbg_update_hdr(hdr, fname, lineno, size); 678 hdr++; 679 } 680 malloc_unlock(ctx, exceptions); 681 return hdr; 682 } 683 684 685 static void *get_payload_start_size(void *raw_buf, size_t *size) 686 { 687 struct mdbg_hdr *hdr = raw_buf; 688 689 assert(bget_buf_size(hdr) >= hdr->pl_size); 690 *size = hdr->pl_size; 691 return hdr + 1; 692 } 693 694 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 695 { 696 struct bpool_iterator itr; 697 void *b; 698 uint32_t exceptions = malloc_lock(ctx); 699 700 raw_malloc_validate_pools(ctx); 701 702 BPOOL_FOREACH(ctx, &itr, &b) { 703 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 704 705 assert_header(hdr); 706 707 if (bufdump > 0) { 708 const char *fname = hdr->fname; 709 710 if (!fname) 711 fname = "unknown"; 712 713 IMSG("buffer: %d bytes %s:%d\n", 714 hdr->pl_size, fname, hdr->line); 715 } 716 } 717 718 malloc_unlock(ctx, exceptions); 719 } 720 721 void *mdbg_malloc(const char *fname, int lineno, size_t size) 722 { 723 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 724 } 725 726 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 727 { 728 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 729 } 730 731 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 732 { 733 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 734 } 735 736 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 737 size_t size) 738 { 739 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 740 } 741 742 #if __STDC_VERSION__ >= 201112L 743 void *mdbg_aligned_alloc(const char *fname, int lineno, size_t alignment, 744 size_t size) 745 { 746 if (size % alignment) 747 return NULL; 748 749 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 750 } 751 #endif /* __STDC_VERSION__ */ 752 753 void mdbg_check(int bufdump) 754 { 755 gen_mdbg_check(&malloc_ctx, bufdump); 756 } 757 758 /* 759 * Since malloc debug is enabled, malloc() and friends are redirected by macros 760 * to mdbg_malloc() etc. 761 * We still want to export the standard entry points in case they are referenced 762 * by the application, either directly or via external libraries. 763 */ 764 #undef malloc 765 void *malloc(size_t size) 766 { 767 return mdbg_malloc(__FILE__, __LINE__, size); 768 } 769 770 #undef calloc 771 void *calloc(size_t nmemb, size_t size) 772 { 773 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 774 } 775 776 #undef realloc 777 void *realloc(void *ptr, size_t size) 778 { 779 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 780 } 781 782 #else /* ENABLE_MDBG */ 783 784 void *malloc(size_t size) 785 { 786 void *p; 787 uint32_t exceptions = malloc_lock(&malloc_ctx); 788 789 p = raw_malloc(0, 0, size, &malloc_ctx); 790 malloc_unlock(&malloc_ctx, exceptions); 791 return p; 792 } 793 794 static void free_helper(void *ptr, bool wipe) 795 { 796 uint32_t exceptions = malloc_lock(&malloc_ctx); 797 798 raw_free(ptr, &malloc_ctx, wipe); 799 malloc_unlock(&malloc_ctx, exceptions); 800 } 801 802 void *calloc(size_t nmemb, size_t size) 803 { 804 void *p; 805 uint32_t exceptions = malloc_lock(&malloc_ctx); 806 807 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 808 malloc_unlock(&malloc_ctx, exceptions); 809 return p; 810 } 811 812 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 813 size_t size) 814 { 815 return raw_realloc(ptr, 0, 0, size, ctx); 816 } 817 818 void *realloc(void *ptr, size_t size) 819 { 820 void *p; 821 uint32_t exceptions = malloc_lock(&malloc_ctx); 822 823 p = realloc_unlocked(&malloc_ctx, ptr, size); 824 malloc_unlock(&malloc_ctx, exceptions); 825 return p; 826 } 827 828 void *memalign(size_t alignment, size_t size) 829 { 830 void *p; 831 uint32_t exceptions = malloc_lock(&malloc_ctx); 832 833 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 834 malloc_unlock(&malloc_ctx, exceptions); 835 return p; 836 } 837 838 #if __STDC_VERSION__ >= 201112L 839 void *aligned_alloc(size_t alignment, size_t size) 840 { 841 if (size % alignment) 842 return NULL; 843 844 return memalign(alignment, size); 845 } 846 #endif /* __STDC_VERSION__ */ 847 848 static void *get_payload_start_size(void *ptr, size_t *size) 849 { 850 *size = bget_buf_size(ptr); 851 return ptr; 852 } 853 854 #endif 855 856 void free(void *ptr) 857 { 858 free_helper(ptr, false); 859 } 860 861 void free_wipe(void *ptr) 862 { 863 free_helper(ptr, true); 864 } 865 866 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 867 { 868 void *p; 869 size_t l; 870 uint32_t exceptions; 871 uintptr_t start = (uintptr_t)buf; 872 uintptr_t end = start + len; 873 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 874 875 start = ROUNDUP(start, SizeQuant); 876 end = ROUNDDOWN(end, SizeQuant); 877 878 if (start > end || (end - start) < min_len) { 879 DMSG("Skipping too small pool"); 880 return; 881 } 882 883 /* First pool requires a bigger size */ 884 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 885 DMSG("Skipping too small initial pool"); 886 return; 887 } 888 889 exceptions = malloc_lock(ctx); 890 891 tag_asan_free((void *)start, end - start); 892 bpool((void *)start, end - start, &ctx->poolset); 893 l = ctx->pool_len + 1; 894 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 895 assert(p); 896 ctx->pool = p; 897 ctx->pool[ctx->pool_len].buf = (void *)start; 898 ctx->pool[ctx->pool_len].len = end - start; 899 #ifdef BufStats 900 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 901 #endif 902 ctx->pool_len = l; 903 malloc_unlock(ctx, exceptions); 904 } 905 906 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 907 void *buf, size_t len) 908 { 909 struct bpool_iterator itr; 910 void *b; 911 uint8_t *start_buf = strip_tag(buf); 912 uint8_t *end_buf = start_buf + len; 913 bool ret = false; 914 uint32_t exceptions = malloc_lock(ctx); 915 916 raw_malloc_validate_pools(ctx); 917 918 /* Check for wrapping */ 919 if (start_buf > end_buf) 920 goto out; 921 922 BPOOL_FOREACH(ctx, &itr, &b) { 923 uint8_t *start_b; 924 uint8_t *end_b; 925 size_t s; 926 927 start_b = get_payload_start_size(b, &s); 928 end_b = start_b + s; 929 930 if (start_buf >= start_b && end_buf <= end_b) { 931 ret = true; 932 goto out; 933 } 934 } 935 936 out: 937 malloc_unlock(ctx, exceptions); 938 939 return ret; 940 } 941 942 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 943 void *buf, size_t len) 944 { 945 uintptr_t buf_start = (uintptr_t) buf; 946 uintptr_t buf_end = buf_start + len; 947 size_t n; 948 bool ret = false; 949 uint32_t exceptions = malloc_lock(ctx); 950 951 raw_malloc_validate_pools(ctx); 952 953 for (n = 0; n < ctx->pool_len; n++) { 954 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 955 uintptr_t pool_end = pool_start + ctx->pool[n].len; 956 957 if (buf_start > buf_end || pool_start > pool_end) { 958 ret = true; /* Wrapping buffers, shouldn't happen */ 959 goto out; 960 } 961 962 if (buf_end > pool_start || buf_start < pool_end) { 963 ret = true; 964 goto out; 965 } 966 } 967 968 out: 969 malloc_unlock(ctx, exceptions); 970 return ret; 971 } 972 973 size_t raw_malloc_get_ctx_size(void) 974 { 975 return sizeof(struct malloc_ctx); 976 } 977 978 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 979 { 980 memset(ctx, 0, sizeof(*ctx)); 981 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 982 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 983 } 984 985 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 986 { 987 gen_malloc_add_pool(ctx, buf, len); 988 } 989 990 #ifdef CFG_WITH_STATS 991 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats) 992 { 993 gen_malloc_get_stats(ctx, stats); 994 } 995 #endif 996 997 void malloc_add_pool(void *buf, size_t len) 998 { 999 gen_malloc_add_pool(&malloc_ctx, buf, len); 1000 } 1001 1002 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 1003 { 1004 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 1005 } 1006 1007 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 1008 { 1009 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 1010 } 1011 1012 #ifdef CFG_VIRTUALIZATION 1013 1014 #ifndef ENABLE_MDBG 1015 1016 void *nex_malloc(size_t size) 1017 { 1018 void *p; 1019 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1020 1021 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 1022 malloc_unlock(&nex_malloc_ctx, exceptions); 1023 return p; 1024 } 1025 1026 void *nex_calloc(size_t nmemb, size_t size) 1027 { 1028 void *p; 1029 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1030 1031 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 1032 malloc_unlock(&nex_malloc_ctx, exceptions); 1033 return p; 1034 } 1035 1036 void *nex_realloc(void *ptr, size_t size) 1037 { 1038 void *p; 1039 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1040 1041 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 1042 malloc_unlock(&nex_malloc_ctx, exceptions); 1043 return p; 1044 } 1045 1046 void *nex_memalign(size_t alignment, size_t size) 1047 { 1048 void *p; 1049 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1050 1051 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 1052 malloc_unlock(&nex_malloc_ctx, exceptions); 1053 return p; 1054 } 1055 1056 void nex_free(void *ptr) 1057 { 1058 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1059 1060 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 1061 malloc_unlock(&nex_malloc_ctx, exceptions); 1062 } 1063 1064 #else /* ENABLE_MDBG */ 1065 1066 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 1067 { 1068 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1069 } 1070 1071 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1072 { 1073 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1074 } 1075 1076 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1077 { 1078 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1079 } 1080 1081 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1082 size_t size) 1083 { 1084 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1085 } 1086 1087 void nex_mdbg_check(int bufdump) 1088 { 1089 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1090 } 1091 1092 void nex_free(void *ptr) 1093 { 1094 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1095 1096 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1097 malloc_unlock(&nex_malloc_ctx, exceptions); 1098 } 1099 1100 #endif /* ENABLE_MDBG */ 1101 1102 void nex_malloc_add_pool(void *buf, size_t len) 1103 { 1104 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1105 } 1106 1107 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1108 { 1109 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1110 } 1111 1112 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1113 { 1114 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1115 } 1116 1117 #ifdef BufStats 1118 1119 void nex_malloc_reset_stats(void) 1120 { 1121 gen_malloc_reset_stats(&nex_malloc_ctx); 1122 } 1123 1124 void nex_malloc_get_stats(struct malloc_stats *stats) 1125 { 1126 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1127 } 1128 1129 #endif 1130 1131 #endif 1132