1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2015-2025, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <memtag.h> 85 #include <pta_stats.h> 86 #include <stdbool.h> 87 #include <stdint.h> 88 #include <stdlib_ext.h> 89 #include <stdlib.h> 90 #include <string.h> 91 #include <trace.h> 92 #include <util.h> 93 94 #if defined(__KERNEL__) 95 /* Compiling for TEE Core */ 96 #include <kernel/asan.h> 97 #include <kernel/spinlock.h> 98 #include <kernel/unwind.h> 99 100 static void *memset_unchecked(void *s, int c, size_t n) 101 { 102 return asan_memset_unchecked(s, c, n); 103 } 104 105 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 106 size_t n) 107 { 108 return asan_memcpy_unchecked(dst, src, n); 109 } 110 111 #else /*__KERNEL__*/ 112 /* Compiling for TA */ 113 114 static void *memset_unchecked(void *s, int c, size_t n) 115 { 116 return memset(s, c, n); 117 } 118 119 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 120 size_t n) 121 { 122 return memcpy(dst, src, n); 123 } 124 125 #endif /*__KERNEL__*/ 126 127 #include "bget.c" /* this is ugly, but this is bget */ 128 129 struct malloc_pool { 130 void *buf; 131 size_t len; 132 }; 133 134 struct malloc_ctx { 135 struct bpoolset poolset; 136 struct malloc_pool *pool; 137 size_t pool_len; 138 #ifdef BufStats 139 struct pta_stats_alloc mstats; 140 #endif 141 #ifdef __KERNEL__ 142 unsigned int spinlock; 143 #endif 144 }; 145 146 #ifdef __KERNEL__ 147 148 static uint32_t malloc_lock(struct malloc_ctx *ctx) 149 { 150 return cpu_spin_lock_xsave(&ctx->spinlock); 151 } 152 153 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 154 { 155 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 156 } 157 158 #else /* __KERNEL__ */ 159 160 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 161 { 162 return 0; 163 } 164 165 static void malloc_unlock(struct malloc_ctx *ctx __unused, 166 uint32_t exceptions __unused) 167 { 168 } 169 170 #endif /* __KERNEL__ */ 171 172 #define DEFINE_CTX(name) struct malloc_ctx name = \ 173 { .poolset = { .freelist = { {0, 0}, \ 174 {&name.poolset.freelist, \ 175 &name.poolset.freelist}}}} 176 177 static DEFINE_CTX(malloc_ctx); 178 179 #ifdef CFG_NS_VIRTUALIZATION 180 static __nex_data DEFINE_CTX(nex_malloc_ctx); 181 #endif 182 183 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 184 { 185 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 186 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 187 print_kernel_stack(); 188 #endif 189 } 190 191 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 192 static __maybe_unused bufsize bget_buf_size(void *buf) 193 { 194 bufsize osize; /* Old size of buffer */ 195 struct bhead *b; 196 197 b = BH(((char *)buf) - sizeof(struct bhead)); 198 osize = -b->bsize; 199 #ifdef BECtl 200 if (osize == 0) { 201 /* Buffer acquired directly through acqfcn. */ 202 struct bdhead *bd; 203 204 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 205 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 206 } else 207 #endif 208 osize -= sizeof(struct bhead); 209 assert(osize > 0); 210 return osize; 211 } 212 213 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size) 214 { 215 if (!buf) 216 return NULL; 217 218 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant); 219 220 if (MEMTAG_IS_ENABLED) { 221 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE); 222 223 /* 224 * Allocated buffer can be larger than requested when 225 * allocating with memalign(), but we should never tag more 226 * than allocated. 227 */ 228 assert(bget_buf_size(buf) >= sz + hdr_size); 229 return memtag_set_random_tags(buf, sz + hdr_size); 230 } 231 232 #if defined(__KERNEL__) 233 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 234 asan_tag_access(buf, buf + hdr_size + requested_size); 235 #endif 236 return buf; 237 } 238 239 static void *maybe_untag_buf(void *buf) 240 { 241 if (!buf) 242 return NULL; 243 244 if (MEMTAG_IS_ENABLED) { 245 size_t sz = 0; 246 247 memtag_assert_tag(buf); /* Trying to catch double free early */ 248 sz = bget_buf_size(memtag_strip_tag(buf)); 249 return memtag_set_tags(buf, sz, 0); 250 } 251 252 #if defined(__KERNEL__) 253 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 254 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 255 #endif 256 return buf; 257 } 258 259 static void *strip_tag(void *buf) 260 { 261 if (MEMTAG_IS_ENABLED) 262 return memtag_strip_tag(buf); 263 return buf; 264 } 265 266 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 267 { 268 #if defined(__KERNEL__) 269 asan_tag_heap_free(buf, (uint8_t *)buf + len); 270 #endif 271 } 272 273 #ifdef BufStats 274 275 static void *raw_malloc_return_hook(void *p, size_t hdr_size, 276 size_t requested_size, 277 struct malloc_ctx *ctx) 278 { 279 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 280 ctx->mstats.max_allocated = ctx->poolset.totalloc; 281 282 if (!p) { 283 ctx->mstats.num_alloc_fail++; 284 print_oom(requested_size, ctx); 285 if (requested_size > ctx->mstats.biggest_alloc_fail) { 286 ctx->mstats.biggest_alloc_fail = requested_size; 287 ctx->mstats.biggest_alloc_fail_used = 288 ctx->poolset.totalloc; 289 } 290 } 291 292 return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size)); 293 } 294 295 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 296 { 297 uint32_t exceptions = malloc_lock(ctx); 298 299 ctx->mstats.max_allocated = 0; 300 ctx->mstats.num_alloc_fail = 0; 301 ctx->mstats.biggest_alloc_fail = 0; 302 ctx->mstats.biggest_alloc_fail_used = 0; 303 malloc_unlock(ctx, exceptions); 304 } 305 306 void malloc_reset_stats(void) 307 { 308 gen_malloc_reset_stats(&malloc_ctx); 309 } 310 311 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 312 struct pta_stats_alloc *stats) 313 { 314 uint32_t exceptions = malloc_lock(ctx); 315 316 raw_malloc_get_stats(ctx, stats); 317 malloc_unlock(ctx, exceptions); 318 } 319 320 void malloc_get_stats(struct pta_stats_alloc *stats) 321 { 322 gen_malloc_get_stats(&malloc_ctx, stats); 323 } 324 325 #else /* BufStats */ 326 327 static void *raw_malloc_return_hook(void *p, size_t hdr_size, 328 size_t requested_size, 329 struct malloc_ctx *ctx ) 330 { 331 if (!p) 332 print_oom(requested_size, ctx); 333 334 return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size)); 335 } 336 337 #endif /* BufStats */ 338 339 #ifdef BufValid 340 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 341 { 342 size_t n; 343 344 for (n = 0; n < ctx->pool_len; n++) 345 bpoolv(ctx->pool[n].buf); 346 } 347 #else 348 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 349 { 350 } 351 #endif 352 353 struct bpool_iterator { 354 struct bfhead *next_buf; 355 size_t pool_idx; 356 }; 357 358 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 359 struct bpool_iterator *iterator) 360 { 361 iterator->pool_idx = 0; 362 iterator->next_buf = BFH(ctx->pool[0].buf); 363 } 364 365 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 366 size_t *len, bool *isfree) 367 { 368 struct bfhead *b = iterator->next_buf; 369 bufsize bs = b->bh.bsize; 370 371 if (bs == ESent) 372 return false; 373 374 if (bs < 0) { 375 /* Allocated buffer */ 376 bs = -bs; 377 378 *isfree = false; 379 } else { 380 /* Free Buffer */ 381 *isfree = true; 382 383 /* Assert that the free list links are intact */ 384 assert(b->ql.blink->ql.flink == b); 385 assert(b->ql.flink->ql.blink == b); 386 } 387 388 *buf = (uint8_t *)b + sizeof(struct bhead); 389 *len = bs - sizeof(struct bhead); 390 391 iterator->next_buf = BFH((uint8_t *)b + bs); 392 return true; 393 } 394 395 static bool bpool_foreach(struct malloc_ctx *ctx, 396 struct bpool_iterator *iterator, void **buf) 397 { 398 while (true) { 399 size_t len; 400 bool isfree; 401 402 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 403 if (isfree) 404 continue; 405 return true; 406 } 407 408 if ((iterator->pool_idx + 1) >= ctx->pool_len) 409 return false; 410 411 iterator->pool_idx++; 412 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 413 } 414 } 415 416 /* Convenience macro for looping over all allocated buffers */ 417 #define BPOOL_FOREACH(ctx, iterator, bp) \ 418 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 419 bpool_foreach((ctx),(iterator), (bp));) 420 421 static void *raw_mem_alloc(uint32_t flags, void *ptr, size_t hdr_size, 422 size_t ftr_size, size_t alignment, size_t pl_nmemb, 423 size_t pl_size, struct malloc_ctx *ctx) 424 { 425 void *p = NULL; 426 bufsize s = 0; 427 428 raw_malloc_validate_pools(ctx); 429 430 if (!alignment || !IS_POWER_OF_TWO(alignment)) 431 return NULL; 432 433 /* Compute total size, excluding hdr_size */ 434 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 435 goto out; 436 if (ADD_OVERFLOW(s, ftr_size, &s)) 437 goto out; 438 439 /* BGET doesn't like 0 sized allocations */ 440 if (!s) 441 s++; 442 443 if ((flags & MAF_ZERO_INIT) && !ptr) 444 p = bgetz(alignment, hdr_size, s, &ctx->poolset); 445 else 446 p = bget(alignment, hdr_size, s, &ctx->poolset); 447 448 if (p && ptr) { 449 void *old_ptr = maybe_untag_buf(ptr); 450 bufsize old_sz = bget_buf_size(old_ptr); 451 bufsize new_sz = s + hdr_size; 452 453 if (old_sz < new_sz) { 454 memcpy_unchecked(p, old_ptr, old_sz); 455 /* User space reallocations are always zeroed */ 456 if (!IS_ENABLED2(__KERNEL__) || (flags & MAF_ZERO_INIT)) 457 memset_unchecked((uint8_t *)p + old_sz, 0, 458 new_sz - old_sz); 459 } else { 460 memcpy_unchecked(p, old_ptr, new_sz); 461 } 462 463 brel(old_ptr, &ctx->poolset, false /*!wipe*/); 464 } 465 out: 466 return raw_malloc_return_hook(p, hdr_size, pl_nmemb * pl_size, ctx); 467 } 468 469 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 470 size_t pl_size, struct malloc_ctx *ctx) 471 { 472 return raw_mem_alloc(MAF_NULL, NULL, hdr_size, ftr_size, alignment, 1, 473 pl_size, ctx); 474 } 475 476 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 477 struct malloc_ctx *ctx) 478 { 479 return raw_mem_alloc(MAF_NULL, NULL, hdr_size, ftr_size, 1, 1, 480 pl_size, ctx); 481 } 482 483 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 484 { 485 raw_malloc_validate_pools(ctx); 486 487 if (ptr) 488 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 489 } 490 491 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 492 size_t pl_size, struct malloc_ctx *ctx) 493 { 494 return raw_mem_alloc(MAF_ZERO_INIT, NULL, hdr_size, ftr_size, 1, 495 pl_nmemb, pl_size, ctx); 496 } 497 498 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 499 size_t pl_size, struct malloc_ctx *ctx) 500 { 501 return raw_mem_alloc(MAF_NULL, ptr, hdr_size, ftr_size, 1, 1, 502 pl_size, ctx); 503 } 504 505 #ifdef ENABLE_MDBG 506 507 struct mdbg_hdr { 508 const char *fname; 509 uint16_t line; 510 uint32_t pl_size; 511 uint32_t magic; 512 #if defined(ARM64) 513 uint64_t pad; 514 #endif 515 }; 516 517 #define MDBG_HEADER_MAGIC 0xadadadad 518 #define MDBG_FOOTER_MAGIC 0xecececec 519 520 static size_t mdbg_get_ftr_size(size_t pl_size) 521 { 522 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 523 524 return ftr_pad + sizeof(uint32_t); 525 } 526 527 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 528 { 529 uint32_t *footer; 530 531 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 532 mdbg_get_ftr_size(hdr->pl_size)); 533 footer--; 534 return strip_tag(footer); 535 } 536 537 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 538 int lineno, size_t pl_size) 539 { 540 uint32_t *footer; 541 542 hdr->fname = fname; 543 hdr->line = lineno; 544 hdr->pl_size = pl_size; 545 hdr->magic = MDBG_HEADER_MAGIC; 546 547 footer = mdbg_get_footer(hdr); 548 *footer = MDBG_FOOTER_MAGIC; 549 } 550 551 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 552 int lineno, size_t size) 553 { 554 struct mdbg_hdr *hdr; 555 uint32_t exceptions = malloc_lock(ctx); 556 557 /* 558 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 559 */ 560 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 561 562 hdr = raw_malloc(sizeof(struct mdbg_hdr), 563 mdbg_get_ftr_size(size), size, ctx); 564 if (hdr) { 565 mdbg_update_hdr(hdr, fname, lineno, size); 566 hdr++; 567 } 568 569 malloc_unlock(ctx, exceptions); 570 return hdr; 571 } 572 573 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 574 { 575 assert(hdr->magic == MDBG_HEADER_MAGIC); 576 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 577 } 578 579 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 580 { 581 struct mdbg_hdr *hdr = ptr; 582 583 if (hdr) { 584 hdr--; 585 assert_header(hdr); 586 hdr->magic = 0; 587 *mdbg_get_footer(hdr) = 0; 588 raw_free(hdr, ctx, wipe); 589 } 590 } 591 592 static void free_helper(void *ptr, bool wipe) 593 { 594 uint32_t exceptions = malloc_lock(&malloc_ctx); 595 596 gen_mdbg_free(&malloc_ctx, ptr, wipe); 597 malloc_unlock(&malloc_ctx, exceptions); 598 } 599 600 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 601 size_t nmemb, size_t size) 602 { 603 struct mdbg_hdr *hdr; 604 uint32_t exceptions = malloc_lock(ctx); 605 606 hdr = raw_calloc(sizeof(struct mdbg_hdr), 607 mdbg_get_ftr_size(nmemb * size), nmemb, size, 608 ctx); 609 if (hdr) { 610 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 611 hdr++; 612 } 613 malloc_unlock(ctx, exceptions); 614 return hdr; 615 } 616 617 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 618 int lineno, void *ptr, size_t size) 619 { 620 struct mdbg_hdr *hdr = ptr; 621 622 if (hdr) { 623 hdr--; 624 assert_header(hdr); 625 } 626 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 627 mdbg_get_ftr_size(size), size, ctx); 628 if (hdr) { 629 mdbg_update_hdr(hdr, fname, lineno, size); 630 hdr++; 631 } 632 return hdr; 633 } 634 635 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 636 int lineno, void *ptr, size_t size) 637 { 638 void *p; 639 uint32_t exceptions = malloc_lock(ctx); 640 641 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 642 malloc_unlock(ctx, exceptions); 643 return p; 644 } 645 646 #define realloc_unlocked(ctx, ptr, size) \ 647 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 648 649 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 650 int lineno, size_t alignment, size_t size) 651 { 652 struct mdbg_hdr *hdr; 653 uint32_t exceptions = malloc_lock(ctx); 654 655 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 656 alignment, size, ctx); 657 if (hdr) { 658 mdbg_update_hdr(hdr, fname, lineno, size); 659 hdr++; 660 } 661 malloc_unlock(ctx, exceptions); 662 return hdr; 663 } 664 665 666 static void *get_payload_start_size(void *raw_buf, size_t *size) 667 { 668 struct mdbg_hdr *hdr = raw_buf; 669 670 assert(bget_buf_size(hdr) >= hdr->pl_size); 671 *size = hdr->pl_size; 672 return hdr + 1; 673 } 674 675 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 676 { 677 struct bpool_iterator itr; 678 void *b; 679 uint32_t exceptions = malloc_lock(ctx); 680 681 raw_malloc_validate_pools(ctx); 682 683 BPOOL_FOREACH(ctx, &itr, &b) { 684 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 685 686 assert_header(hdr); 687 688 if (bufdump > 0) { 689 const char *fname = hdr->fname; 690 691 if (!fname) 692 fname = "unknown"; 693 694 IMSG("buffer: %d bytes %s:%d", 695 hdr->pl_size, fname, hdr->line); 696 } 697 } 698 699 malloc_unlock(ctx, exceptions); 700 } 701 702 void *mdbg_malloc(const char *fname, int lineno, size_t size) 703 { 704 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 705 } 706 707 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 708 { 709 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 710 } 711 712 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 713 { 714 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 715 } 716 717 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 718 size_t size) 719 { 720 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 721 } 722 723 #if __STDC_VERSION__ >= 201112L 724 void *mdbg_aligned_alloc(const char *fname, int lineno, size_t alignment, 725 size_t size) 726 { 727 if (size % alignment) 728 return NULL; 729 730 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 731 } 732 #endif /* __STDC_VERSION__ */ 733 734 void mdbg_check(int bufdump) 735 { 736 gen_mdbg_check(&malloc_ctx, bufdump); 737 } 738 739 /* 740 * Since malloc debug is enabled, malloc() and friends are redirected by macros 741 * to mdbg_malloc() etc. 742 * We still want to export the standard entry points in case they are referenced 743 * by the application, either directly or via external libraries. 744 */ 745 #undef malloc 746 void *malloc(size_t size) 747 { 748 return mdbg_malloc(__FILE__, __LINE__, size); 749 } 750 751 #undef calloc 752 void *calloc(size_t nmemb, size_t size) 753 { 754 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 755 } 756 757 #undef realloc 758 void *realloc(void *ptr, size_t size) 759 { 760 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 761 } 762 763 #else /* ENABLE_MDBG */ 764 765 void *malloc(size_t size) 766 { 767 void *p; 768 uint32_t exceptions = malloc_lock(&malloc_ctx); 769 770 p = raw_malloc(0, 0, size, &malloc_ctx); 771 malloc_unlock(&malloc_ctx, exceptions); 772 return p; 773 } 774 775 static void free_helper(void *ptr, bool wipe) 776 { 777 uint32_t exceptions = malloc_lock(&malloc_ctx); 778 779 raw_free(ptr, &malloc_ctx, wipe); 780 malloc_unlock(&malloc_ctx, exceptions); 781 } 782 783 void *calloc(size_t nmemb, size_t size) 784 { 785 void *p; 786 uint32_t exceptions = malloc_lock(&malloc_ctx); 787 788 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 789 malloc_unlock(&malloc_ctx, exceptions); 790 return p; 791 } 792 793 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 794 size_t size) 795 { 796 return raw_realloc(ptr, 0, 0, size, ctx); 797 } 798 799 void *realloc(void *ptr, size_t size) 800 { 801 void *p; 802 uint32_t exceptions = malloc_lock(&malloc_ctx); 803 804 p = realloc_unlocked(&malloc_ctx, ptr, size); 805 malloc_unlock(&malloc_ctx, exceptions); 806 return p; 807 } 808 809 void *memalign(size_t alignment, size_t size) 810 { 811 void *p; 812 uint32_t exceptions = malloc_lock(&malloc_ctx); 813 814 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 815 malloc_unlock(&malloc_ctx, exceptions); 816 return p; 817 } 818 819 #if __STDC_VERSION__ >= 201112L 820 void *aligned_alloc(size_t alignment, size_t size) 821 { 822 if (size % alignment) 823 return NULL; 824 825 return memalign(alignment, size); 826 } 827 #endif /* __STDC_VERSION__ */ 828 829 static void *get_payload_start_size(void *ptr, size_t *size) 830 { 831 *size = bget_buf_size(ptr); 832 return ptr; 833 } 834 835 #endif 836 837 void free(void *ptr) 838 { 839 free_helper(ptr, false); 840 } 841 842 void free_wipe(void *ptr) 843 { 844 free_helper(ptr, true); 845 } 846 847 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 848 { 849 uint32_t exceptions = malloc_lock(ctx); 850 851 raw_malloc_add_pool(ctx, buf, len); 852 malloc_unlock(ctx, exceptions); 853 } 854 855 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 856 void *buf, size_t len) 857 { 858 uint32_t exceptions = malloc_lock(ctx); 859 bool ret = false; 860 861 ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len); 862 malloc_unlock(ctx, exceptions); 863 864 return ret; 865 } 866 867 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 868 void *buf, size_t len) 869 { 870 bool ret = false; 871 uint32_t exceptions = malloc_lock(ctx); 872 873 ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len); 874 malloc_unlock(ctx, exceptions); 875 return ret; 876 } 877 878 size_t raw_malloc_get_ctx_size(void) 879 { 880 return sizeof(struct malloc_ctx); 881 } 882 883 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 884 { 885 memset(ctx, 0, sizeof(*ctx)); 886 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 887 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 888 } 889 890 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 891 { 892 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 893 uintptr_t start = (uintptr_t)buf; 894 uintptr_t end = start + len; 895 void *p = NULL; 896 size_t l = 0; 897 898 start = ROUNDUP(start, SizeQuant); 899 end = ROUNDDOWN(end, SizeQuant); 900 901 if (start > end || (end - start) < min_len) { 902 DMSG("Skipping too small pool"); 903 return; 904 } 905 906 /* First pool requires a bigger size */ 907 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 908 DMSG("Skipping too small initial pool"); 909 return; 910 } 911 912 tag_asan_free((void *)start, end - start); 913 bpool((void *)start, end - start, &ctx->poolset); 914 l = ctx->pool_len + 1; 915 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 916 assert(p); 917 ctx->pool = p; 918 ctx->pool[ctx->pool_len].buf = (void *)start; 919 ctx->pool[ctx->pool_len].len = end - start; 920 #ifdef BufStats 921 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 922 #endif 923 ctx->pool_len = l; 924 } 925 926 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 927 void *buf, size_t len) 928 { 929 uintptr_t buf_start = (uintptr_t)strip_tag(buf); 930 uintptr_t buf_end = buf_start + len; 931 size_t n = 0; 932 933 raw_malloc_validate_pools(ctx); 934 935 for (n = 0; n < ctx->pool_len; n++) { 936 uintptr_t pool_start = (uintptr_t)strip_tag(ctx->pool[n].buf); 937 uintptr_t pool_end = pool_start + ctx->pool[n].len; 938 939 if (buf_start > buf_end || pool_start > pool_end) 940 return true; /* Wrapping buffers, shouldn't happen */ 941 942 if ((buf_start >= pool_start && buf_start < pool_end) || 943 (buf_end > pool_start && buf_end < pool_end)) 944 return true; 945 } 946 947 return false; 948 } 949 950 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 951 void *buf, size_t len) 952 { 953 struct bpool_iterator itr = { }; 954 void *b = NULL; 955 uint8_t *start_buf = strip_tag(buf); 956 uint8_t *end_buf = start_buf + len; 957 958 raw_malloc_validate_pools(ctx); 959 960 /* Check for wrapping */ 961 if (start_buf > end_buf) 962 return false; 963 964 BPOOL_FOREACH(ctx, &itr, &b) { 965 uint8_t *start_b = NULL; 966 uint8_t *end_b = NULL; 967 size_t s = 0; 968 969 start_b = strip_tag(get_payload_start_size(b, &s)); 970 end_b = start_b + s; 971 if (start_buf >= start_b && end_buf <= end_b) 972 return true; 973 } 974 975 return false; 976 } 977 978 #ifdef CFG_WITH_STATS 979 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct pta_stats_alloc *stats) 980 { 981 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 982 stats->allocated = ctx->poolset.totalloc; 983 } 984 #endif 985 986 void malloc_add_pool(void *buf, size_t len) 987 { 988 gen_malloc_add_pool(&malloc_ctx, buf, len); 989 } 990 991 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 992 { 993 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 994 } 995 996 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 997 { 998 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 999 } 1000 1001 #ifdef CFG_NS_VIRTUALIZATION 1002 1003 #ifndef ENABLE_MDBG 1004 1005 void *nex_malloc(size_t size) 1006 { 1007 void *p; 1008 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1009 1010 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 1011 malloc_unlock(&nex_malloc_ctx, exceptions); 1012 return p; 1013 } 1014 1015 void *nex_calloc(size_t nmemb, size_t size) 1016 { 1017 void *p; 1018 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1019 1020 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 1021 malloc_unlock(&nex_malloc_ctx, exceptions); 1022 return p; 1023 } 1024 1025 void *nex_realloc(void *ptr, size_t size) 1026 { 1027 void *p; 1028 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1029 1030 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 1031 malloc_unlock(&nex_malloc_ctx, exceptions); 1032 return p; 1033 } 1034 1035 void *nex_memalign(size_t alignment, size_t size) 1036 { 1037 void *p; 1038 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1039 1040 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 1041 malloc_unlock(&nex_malloc_ctx, exceptions); 1042 return p; 1043 } 1044 1045 void nex_free(void *ptr) 1046 { 1047 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1048 1049 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 1050 malloc_unlock(&nex_malloc_ctx, exceptions); 1051 } 1052 1053 #else /* ENABLE_MDBG */ 1054 1055 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 1056 { 1057 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1058 } 1059 1060 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1061 { 1062 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1063 } 1064 1065 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1066 { 1067 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1068 } 1069 1070 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1071 size_t size) 1072 { 1073 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1074 } 1075 1076 void nex_mdbg_check(int bufdump) 1077 { 1078 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1079 } 1080 1081 void nex_free(void *ptr) 1082 { 1083 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1084 1085 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1086 malloc_unlock(&nex_malloc_ctx, exceptions); 1087 } 1088 1089 #endif /* ENABLE_MDBG */ 1090 1091 void nex_malloc_add_pool(void *buf, size_t len) 1092 { 1093 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1094 } 1095 1096 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1097 { 1098 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1099 } 1100 1101 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1102 { 1103 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1104 } 1105 1106 #ifdef BufStats 1107 1108 void nex_malloc_reset_stats(void) 1109 { 1110 gen_malloc_reset_stats(&nex_malloc_ctx); 1111 } 1112 1113 void nex_malloc_get_stats(struct pta_stats_alloc *stats) 1114 { 1115 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1116 } 1117 1118 #endif 1119 1120 #endif 1121