1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2022, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <memtag.h> 85 #include <stdbool.h> 86 #include <stdint.h> 87 #include <stdlib_ext.h> 88 #include <stdlib.h> 89 #include <string.h> 90 #include <trace.h> 91 #include <util.h> 92 93 #if defined(__KERNEL__) 94 /* Compiling for TEE Core */ 95 #include <kernel/asan.h> 96 #include <kernel/spinlock.h> 97 #include <kernel/unwind.h> 98 99 static void *memset_unchecked(void *s, int c, size_t n) 100 { 101 return asan_memset_unchecked(s, c, n); 102 } 103 104 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 105 size_t n) 106 { 107 return asan_memcpy_unchecked(dst, src, n); 108 } 109 110 #else /*__KERNEL__*/ 111 /* Compiling for TA */ 112 113 static void *memset_unchecked(void *s, int c, size_t n) 114 { 115 return memset(s, c, n); 116 } 117 118 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 119 size_t n) 120 { 121 return memcpy(dst, src, n); 122 } 123 124 #endif /*__KERNEL__*/ 125 126 #include "bget.c" /* this is ugly, but this is bget */ 127 128 struct malloc_pool { 129 void *buf; 130 size_t len; 131 }; 132 133 struct malloc_ctx { 134 struct bpoolset poolset; 135 struct malloc_pool *pool; 136 size_t pool_len; 137 #ifdef BufStats 138 struct malloc_stats mstats; 139 #endif 140 #ifdef __KERNEL__ 141 unsigned int spinlock; 142 #endif 143 }; 144 145 #ifdef __KERNEL__ 146 147 static uint32_t malloc_lock(struct malloc_ctx *ctx) 148 { 149 return cpu_spin_lock_xsave(&ctx->spinlock); 150 } 151 152 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 153 { 154 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 155 } 156 157 #else /* __KERNEL__ */ 158 159 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 160 { 161 return 0; 162 } 163 164 static void malloc_unlock(struct malloc_ctx *ctx __unused, 165 uint32_t exceptions __unused) 166 { 167 } 168 169 #endif /* __KERNEL__ */ 170 171 #define DEFINE_CTX(name) struct malloc_ctx name = \ 172 { .poolset = { .freelist = { {0, 0}, \ 173 {&name.poolset.freelist, \ 174 &name.poolset.freelist}}}} 175 176 static DEFINE_CTX(malloc_ctx); 177 178 #ifdef CFG_VIRTUALIZATION 179 static __nex_data DEFINE_CTX(nex_malloc_ctx); 180 #endif 181 182 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 183 { 184 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 185 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 186 print_kernel_stack(); 187 #endif 188 } 189 190 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 191 static __maybe_unused bufsize bget_buf_size(void *buf) 192 { 193 bufsize osize; /* Old size of buffer */ 194 struct bhead *b; 195 196 b = BH(((char *)buf) - sizeof(struct bhead)); 197 osize = -b->bsize; 198 #ifdef BECtl 199 if (osize == 0) { 200 /* Buffer acquired directly through acqfcn. */ 201 struct bdhead *bd; 202 203 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 204 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 205 } else 206 #endif 207 osize -= sizeof(struct bhead); 208 assert(osize > 0); 209 return osize; 210 } 211 212 static void *maybe_tag_buf(void *buf, size_t __maybe_unused requested_size) 213 { 214 if (!buf) 215 return NULL; 216 217 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant); 218 219 if (MEMTAG_IS_ENABLED) { 220 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE); 221 222 /* 223 * Allocated buffer can be larger than requested when 224 * allocating with memalign(), but we should never tag more 225 * than allocated. 226 */ 227 assert(bget_buf_size(buf) >= sz); 228 return memtag_set_random_tags(buf, sz); 229 } 230 231 #if defined(__KERNEL__) 232 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 233 asan_tag_access(buf, (uint8_t *)buf + requested_size); 234 #endif 235 return buf; 236 } 237 238 static void *maybe_untag_buf(void *buf) 239 { 240 if (!buf) 241 return NULL; 242 243 if (MEMTAG_IS_ENABLED) { 244 size_t sz = 0; 245 246 memtag_assert_tag(buf); /* Trying to catch double free early */ 247 sz = bget_buf_size(memtag_strip_tag(buf)); 248 return memtag_set_tags(buf, sz, 0); 249 } 250 251 #if defined(__KERNEL__) 252 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 253 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 254 #endif 255 return buf; 256 } 257 258 static void *strip_tag(void *buf) 259 { 260 if (MEMTAG_IS_ENABLED) 261 return memtag_strip_tag(buf); 262 return buf; 263 } 264 265 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 266 { 267 #if defined(__KERNEL__) 268 asan_tag_heap_free(buf, (uint8_t *)buf + len); 269 #endif 270 } 271 272 #ifdef BufStats 273 274 static void *raw_malloc_return_hook(void *p, size_t requested_size, 275 struct malloc_ctx *ctx) 276 { 277 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 278 ctx->mstats.max_allocated = ctx->poolset.totalloc; 279 280 if (!p) { 281 ctx->mstats.num_alloc_fail++; 282 print_oom(requested_size, ctx); 283 if (requested_size > ctx->mstats.biggest_alloc_fail) { 284 ctx->mstats.biggest_alloc_fail = requested_size; 285 ctx->mstats.biggest_alloc_fail_used = 286 ctx->poolset.totalloc; 287 } 288 } 289 290 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 291 } 292 293 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 294 { 295 uint32_t exceptions = malloc_lock(ctx); 296 297 ctx->mstats.max_allocated = 0; 298 ctx->mstats.num_alloc_fail = 0; 299 ctx->mstats.biggest_alloc_fail = 0; 300 ctx->mstats.biggest_alloc_fail_used = 0; 301 malloc_unlock(ctx, exceptions); 302 } 303 304 void malloc_reset_stats(void) 305 { 306 gen_malloc_reset_stats(&malloc_ctx); 307 } 308 309 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 310 struct malloc_stats *stats) 311 { 312 uint32_t exceptions = malloc_lock(ctx); 313 314 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 315 stats->allocated = ctx->poolset.totalloc; 316 malloc_unlock(ctx, exceptions); 317 } 318 319 void malloc_get_stats(struct malloc_stats *stats) 320 { 321 gen_malloc_get_stats(&malloc_ctx, stats); 322 } 323 324 #else /* BufStats */ 325 326 static void *raw_malloc_return_hook(void *p, size_t requested_size, 327 struct malloc_ctx *ctx ) 328 { 329 if (!p) 330 print_oom(requested_size, ctx); 331 332 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 333 } 334 335 #endif /* BufStats */ 336 337 #ifdef BufValid 338 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 339 { 340 size_t n; 341 342 for (n = 0; n < ctx->pool_len; n++) 343 bpoolv(ctx->pool[n].buf); 344 } 345 #else 346 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 347 { 348 } 349 #endif 350 351 struct bpool_iterator { 352 struct bfhead *next_buf; 353 size_t pool_idx; 354 }; 355 356 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 357 struct bpool_iterator *iterator) 358 { 359 iterator->pool_idx = 0; 360 iterator->next_buf = BFH(ctx->pool[0].buf); 361 } 362 363 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 364 size_t *len, bool *isfree) 365 { 366 struct bfhead *b = iterator->next_buf; 367 bufsize bs = b->bh.bsize; 368 369 if (bs == ESent) 370 return false; 371 372 if (bs < 0) { 373 /* Allocated buffer */ 374 bs = -bs; 375 376 *isfree = false; 377 } else { 378 /* Free Buffer */ 379 *isfree = true; 380 381 /* Assert that the free list links are intact */ 382 assert(b->ql.blink->ql.flink == b); 383 assert(b->ql.flink->ql.blink == b); 384 } 385 386 *buf = (uint8_t *)b + sizeof(struct bhead); 387 *len = bs - sizeof(struct bhead); 388 389 iterator->next_buf = BFH((uint8_t *)b + bs); 390 return true; 391 } 392 393 static bool bpool_foreach(struct malloc_ctx *ctx, 394 struct bpool_iterator *iterator, void **buf) 395 { 396 while (true) { 397 size_t len; 398 bool isfree; 399 400 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 401 if (isfree) 402 continue; 403 return true; 404 } 405 406 if ((iterator->pool_idx + 1) >= ctx->pool_len) 407 return false; 408 409 iterator->pool_idx++; 410 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 411 } 412 } 413 414 /* Convenience macro for looping over all allocated buffers */ 415 #define BPOOL_FOREACH(ctx, iterator, bp) \ 416 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 417 bpool_foreach((ctx),(iterator), (bp));) 418 419 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 420 size_t pl_size, struct malloc_ctx *ctx) 421 { 422 void *ptr = NULL; 423 bufsize s; 424 425 if (!alignment || !IS_POWER_OF_TWO(alignment)) 426 return NULL; 427 428 raw_malloc_validate_pools(ctx); 429 430 /* Compute total size, excluding the header */ 431 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 432 goto out; 433 434 /* BGET doesn't like 0 sized allocations */ 435 if (!s) 436 s++; 437 438 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 439 out: 440 return raw_malloc_return_hook(ptr, pl_size, ctx); 441 } 442 443 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 444 struct malloc_ctx *ctx) 445 { 446 /* 447 * Note that we're feeding SizeQ as alignment, this is the smallest 448 * alignment that bget() can use. 449 */ 450 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 451 } 452 453 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 454 { 455 raw_malloc_validate_pools(ctx); 456 457 if (ptr) 458 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 459 } 460 461 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 462 size_t pl_size, struct malloc_ctx *ctx) 463 { 464 void *ptr = NULL; 465 bufsize s; 466 467 raw_malloc_validate_pools(ctx); 468 469 /* Compute total size, excluding hdr_size */ 470 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 471 goto out; 472 if (ADD_OVERFLOW(s, ftr_size, &s)) 473 goto out; 474 475 /* BGET doesn't like 0 sized allocations */ 476 if (!s) 477 s++; 478 479 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 480 out: 481 return raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 482 } 483 484 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 485 size_t pl_size, struct malloc_ctx *ctx) 486 { 487 void *p = NULL; 488 bufsize s; 489 490 /* Compute total size */ 491 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 492 goto out; 493 if (ADD_OVERFLOW(s, ftr_size, &s)) 494 goto out; 495 496 raw_malloc_validate_pools(ctx); 497 498 /* BGET doesn't like 0 sized allocations */ 499 if (!s) 500 s++; 501 502 p = bget(0, 0, s, &ctx->poolset); 503 504 if (p && ptr) { 505 void *old_ptr = maybe_untag_buf(ptr); 506 bufsize old_sz = bget_buf_size(old_ptr); 507 508 if (old_sz < s) { 509 memcpy(p, old_ptr, old_sz); 510 #ifndef __KERNEL__ 511 /* User space reallocations are always zeroed */ 512 memset((uint8_t *)p + old_sz, 0, s - old_sz); 513 #endif 514 } else { 515 memcpy(p, old_ptr, s); 516 } 517 518 brel(old_ptr, &ctx->poolset, false /*!wipe*/); 519 } 520 out: 521 return raw_malloc_return_hook(p, pl_size, ctx); 522 } 523 524 #ifdef ENABLE_MDBG 525 526 struct mdbg_hdr { 527 const char *fname; 528 uint16_t line; 529 uint32_t pl_size; 530 uint32_t magic; 531 #if defined(ARM64) 532 uint64_t pad; 533 #endif 534 }; 535 536 #define MDBG_HEADER_MAGIC 0xadadadad 537 #define MDBG_FOOTER_MAGIC 0xecececec 538 539 static size_t mdbg_get_ftr_size(size_t pl_size) 540 { 541 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 542 543 return ftr_pad + sizeof(uint32_t); 544 } 545 546 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 547 { 548 uint32_t *footer; 549 550 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 551 mdbg_get_ftr_size(hdr->pl_size)); 552 footer--; 553 return footer; 554 } 555 556 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 557 int lineno, size_t pl_size) 558 { 559 uint32_t *footer; 560 561 hdr->fname = fname; 562 hdr->line = lineno; 563 hdr->pl_size = pl_size; 564 hdr->magic = MDBG_HEADER_MAGIC; 565 566 footer = mdbg_get_footer(hdr); 567 *footer = MDBG_FOOTER_MAGIC; 568 } 569 570 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 571 int lineno, size_t size) 572 { 573 struct mdbg_hdr *hdr; 574 uint32_t exceptions = malloc_lock(ctx); 575 576 /* 577 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 578 */ 579 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 580 581 hdr = raw_malloc(sizeof(struct mdbg_hdr), 582 mdbg_get_ftr_size(size), size, ctx); 583 if (hdr) { 584 mdbg_update_hdr(hdr, fname, lineno, size); 585 hdr++; 586 } 587 588 malloc_unlock(ctx, exceptions); 589 return hdr; 590 } 591 592 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 593 { 594 assert(hdr->magic == MDBG_HEADER_MAGIC); 595 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 596 } 597 598 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 599 { 600 struct mdbg_hdr *hdr = ptr; 601 602 if (hdr) { 603 hdr--; 604 assert_header(hdr); 605 hdr->magic = 0; 606 *mdbg_get_footer(hdr) = 0; 607 raw_free(hdr, ctx, wipe); 608 } 609 } 610 611 static void free_helper(void *ptr, bool wipe) 612 { 613 uint32_t exceptions = malloc_lock(&malloc_ctx); 614 615 gen_mdbg_free(&malloc_ctx, ptr, wipe); 616 malloc_unlock(&malloc_ctx, exceptions); 617 } 618 619 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 620 size_t nmemb, size_t size) 621 { 622 struct mdbg_hdr *hdr; 623 uint32_t exceptions = malloc_lock(ctx); 624 625 hdr = raw_calloc(sizeof(struct mdbg_hdr), 626 mdbg_get_ftr_size(nmemb * size), nmemb, size, 627 ctx); 628 if (hdr) { 629 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 630 hdr++; 631 } 632 malloc_unlock(ctx, exceptions); 633 return hdr; 634 } 635 636 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 637 int lineno, void *ptr, size_t size) 638 { 639 struct mdbg_hdr *hdr = ptr; 640 641 if (hdr) { 642 hdr--; 643 assert_header(hdr); 644 } 645 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 646 mdbg_get_ftr_size(size), size, ctx); 647 if (hdr) { 648 mdbg_update_hdr(hdr, fname, lineno, size); 649 hdr++; 650 } 651 return hdr; 652 } 653 654 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 655 int lineno, void *ptr, size_t size) 656 { 657 void *p; 658 uint32_t exceptions = malloc_lock(ctx); 659 660 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 661 malloc_unlock(ctx, exceptions); 662 return p; 663 } 664 665 #define realloc_unlocked(ctx, ptr, size) \ 666 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 667 668 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 669 int lineno, size_t alignment, size_t size) 670 { 671 struct mdbg_hdr *hdr; 672 uint32_t exceptions = malloc_lock(ctx); 673 674 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 675 alignment, size, ctx); 676 if (hdr) { 677 mdbg_update_hdr(hdr, fname, lineno, size); 678 hdr++; 679 } 680 malloc_unlock(ctx, exceptions); 681 return hdr; 682 } 683 684 685 static void *get_payload_start_size(void *raw_buf, size_t *size) 686 { 687 struct mdbg_hdr *hdr = raw_buf; 688 689 assert(bget_buf_size(hdr) >= hdr->pl_size); 690 *size = hdr->pl_size; 691 return hdr + 1; 692 } 693 694 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 695 { 696 struct bpool_iterator itr; 697 void *b; 698 uint32_t exceptions = malloc_lock(ctx); 699 700 raw_malloc_validate_pools(ctx); 701 702 BPOOL_FOREACH(ctx, &itr, &b) { 703 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 704 705 assert_header(hdr); 706 707 if (bufdump > 0) { 708 const char *fname = hdr->fname; 709 710 if (!fname) 711 fname = "unknown"; 712 713 IMSG("buffer: %d bytes %s:%d\n", 714 hdr->pl_size, fname, hdr->line); 715 } 716 } 717 718 malloc_unlock(ctx, exceptions); 719 } 720 721 void *mdbg_malloc(const char *fname, int lineno, size_t size) 722 { 723 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 724 } 725 726 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 727 { 728 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 729 } 730 731 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 732 { 733 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 734 } 735 736 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 737 size_t size) 738 { 739 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 740 } 741 742 void mdbg_check(int bufdump) 743 { 744 gen_mdbg_check(&malloc_ctx, bufdump); 745 } 746 747 /* 748 * Since malloc debug is enabled, malloc() and friends are redirected by macros 749 * to mdbg_malloc() etc. 750 * We still want to export the standard entry points in case they are referenced 751 * by the application, either directly or via external libraries. 752 */ 753 #undef malloc 754 void *malloc(size_t size) 755 { 756 return mdbg_malloc(__FILE__, __LINE__, size); 757 } 758 759 #undef calloc 760 void *calloc(size_t nmemb, size_t size) 761 { 762 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 763 } 764 765 #undef realloc 766 void *realloc(void *ptr, size_t size) 767 { 768 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 769 } 770 771 #else /* ENABLE_MDBG */ 772 773 void *malloc(size_t size) 774 { 775 void *p; 776 uint32_t exceptions = malloc_lock(&malloc_ctx); 777 778 p = raw_malloc(0, 0, size, &malloc_ctx); 779 malloc_unlock(&malloc_ctx, exceptions); 780 return p; 781 } 782 783 static void free_helper(void *ptr, bool wipe) 784 { 785 uint32_t exceptions = malloc_lock(&malloc_ctx); 786 787 raw_free(ptr, &malloc_ctx, wipe); 788 malloc_unlock(&malloc_ctx, exceptions); 789 } 790 791 void *calloc(size_t nmemb, size_t size) 792 { 793 void *p; 794 uint32_t exceptions = malloc_lock(&malloc_ctx); 795 796 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 797 malloc_unlock(&malloc_ctx, exceptions); 798 return p; 799 } 800 801 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 802 size_t size) 803 { 804 return raw_realloc(ptr, 0, 0, size, ctx); 805 } 806 807 void *realloc(void *ptr, size_t size) 808 { 809 void *p; 810 uint32_t exceptions = malloc_lock(&malloc_ctx); 811 812 p = realloc_unlocked(&malloc_ctx, ptr, size); 813 malloc_unlock(&malloc_ctx, exceptions); 814 return p; 815 } 816 817 void *memalign(size_t alignment, size_t size) 818 { 819 void *p; 820 uint32_t exceptions = malloc_lock(&malloc_ctx); 821 822 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 823 malloc_unlock(&malloc_ctx, exceptions); 824 return p; 825 } 826 827 static void *get_payload_start_size(void *ptr, size_t *size) 828 { 829 *size = bget_buf_size(ptr); 830 return ptr; 831 } 832 833 #endif 834 835 void free(void *ptr) 836 { 837 free_helper(ptr, false); 838 } 839 840 void free_wipe(void *ptr) 841 { 842 free_helper(ptr, true); 843 } 844 845 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 846 { 847 void *p; 848 size_t l; 849 uint32_t exceptions; 850 uintptr_t start = (uintptr_t)buf; 851 uintptr_t end = start + len; 852 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 853 854 start = ROUNDUP(start, SizeQuant); 855 end = ROUNDDOWN(end, SizeQuant); 856 857 if (start > end || (end - start) < min_len) { 858 DMSG("Skipping too small pool"); 859 return; 860 } 861 862 /* First pool requires a bigger size */ 863 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 864 DMSG("Skipping too small initial pool"); 865 return; 866 } 867 868 exceptions = malloc_lock(ctx); 869 870 tag_asan_free((void *)start, end - start); 871 bpool((void *)start, end - start, &ctx->poolset); 872 l = ctx->pool_len + 1; 873 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 874 assert(p); 875 ctx->pool = p; 876 ctx->pool[ctx->pool_len].buf = (void *)start; 877 ctx->pool[ctx->pool_len].len = end - start; 878 #ifdef BufStats 879 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 880 #endif 881 ctx->pool_len = l; 882 malloc_unlock(ctx, exceptions); 883 } 884 885 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 886 void *buf, size_t len) 887 { 888 struct bpool_iterator itr; 889 void *b; 890 uint8_t *start_buf = strip_tag(buf); 891 uint8_t *end_buf = start_buf + len; 892 bool ret = false; 893 uint32_t exceptions = malloc_lock(ctx); 894 895 raw_malloc_validate_pools(ctx); 896 897 /* Check for wrapping */ 898 if (start_buf > end_buf) 899 goto out; 900 901 BPOOL_FOREACH(ctx, &itr, &b) { 902 uint8_t *start_b; 903 uint8_t *end_b; 904 size_t s; 905 906 start_b = get_payload_start_size(b, &s); 907 end_b = start_b + s; 908 909 if (start_buf >= start_b && end_buf <= end_b) { 910 ret = true; 911 goto out; 912 } 913 } 914 915 out: 916 malloc_unlock(ctx, exceptions); 917 918 return ret; 919 } 920 921 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 922 void *buf, size_t len) 923 { 924 uintptr_t buf_start = (uintptr_t) buf; 925 uintptr_t buf_end = buf_start + len; 926 size_t n; 927 bool ret = false; 928 uint32_t exceptions = malloc_lock(ctx); 929 930 raw_malloc_validate_pools(ctx); 931 932 for (n = 0; n < ctx->pool_len; n++) { 933 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 934 uintptr_t pool_end = pool_start + ctx->pool[n].len; 935 936 if (buf_start > buf_end || pool_start > pool_end) { 937 ret = true; /* Wrapping buffers, shouldn't happen */ 938 goto out; 939 } 940 941 if (buf_end > pool_start || buf_start < pool_end) { 942 ret = true; 943 goto out; 944 } 945 } 946 947 out: 948 malloc_unlock(ctx, exceptions); 949 return ret; 950 } 951 952 size_t raw_malloc_get_ctx_size(void) 953 { 954 return sizeof(struct malloc_ctx); 955 } 956 957 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 958 { 959 memset(ctx, 0, sizeof(*ctx)); 960 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 961 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 962 } 963 964 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 965 { 966 gen_malloc_add_pool(ctx, buf, len); 967 } 968 969 #ifdef CFG_WITH_STATS 970 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats) 971 { 972 gen_malloc_get_stats(ctx, stats); 973 } 974 #endif 975 976 void malloc_add_pool(void *buf, size_t len) 977 { 978 gen_malloc_add_pool(&malloc_ctx, buf, len); 979 } 980 981 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 982 { 983 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 984 } 985 986 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 987 { 988 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 989 } 990 991 #ifdef CFG_VIRTUALIZATION 992 993 #ifndef ENABLE_MDBG 994 995 void *nex_malloc(size_t size) 996 { 997 void *p; 998 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 999 1000 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 1001 malloc_unlock(&nex_malloc_ctx, exceptions); 1002 return p; 1003 } 1004 1005 void *nex_calloc(size_t nmemb, size_t size) 1006 { 1007 void *p; 1008 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1009 1010 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 1011 malloc_unlock(&nex_malloc_ctx, exceptions); 1012 return p; 1013 } 1014 1015 void *nex_realloc(void *ptr, size_t size) 1016 { 1017 void *p; 1018 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1019 1020 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 1021 malloc_unlock(&nex_malloc_ctx, exceptions); 1022 return p; 1023 } 1024 1025 void *nex_memalign(size_t alignment, size_t size) 1026 { 1027 void *p; 1028 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1029 1030 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 1031 malloc_unlock(&nex_malloc_ctx, exceptions); 1032 return p; 1033 } 1034 1035 void nex_free(void *ptr) 1036 { 1037 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1038 1039 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 1040 malloc_unlock(&nex_malloc_ctx, exceptions); 1041 } 1042 1043 #else /* ENABLE_MDBG */ 1044 1045 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 1046 { 1047 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1048 } 1049 1050 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1051 { 1052 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1053 } 1054 1055 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1056 { 1057 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1058 } 1059 1060 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1061 size_t size) 1062 { 1063 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1064 } 1065 1066 void nex_mdbg_check(int bufdump) 1067 { 1068 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1069 } 1070 1071 void nex_free(void *ptr) 1072 { 1073 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1074 1075 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1076 malloc_unlock(&nex_malloc_ctx, exceptions); 1077 } 1078 1079 #endif /* ENABLE_MDBG */ 1080 1081 void nex_malloc_add_pool(void *buf, size_t len) 1082 { 1083 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1084 } 1085 1086 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1087 { 1088 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1089 } 1090 1091 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1092 { 1093 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1094 } 1095 1096 #ifdef BufStats 1097 1098 void nex_malloc_reset_stats(void) 1099 { 1100 gen_malloc_reset_stats(&nex_malloc_ctx); 1101 } 1102 1103 void nex_malloc_get_stats(struct malloc_stats *stats) 1104 { 1105 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1106 } 1107 1108 #endif 1109 1110 #endif 1111