1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2022, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <memtag.h> 85 #include <stdbool.h> 86 #include <stdint.h> 87 #include <stdlib_ext.h> 88 #include <stdlib.h> 89 #include <string.h> 90 #include <trace.h> 91 #include <util.h> 92 93 #if defined(__KERNEL__) 94 /* Compiling for TEE Core */ 95 #include <kernel/asan.h> 96 #include <kernel/spinlock.h> 97 #include <kernel/unwind.h> 98 99 static void *memset_unchecked(void *s, int c, size_t n) 100 { 101 return asan_memset_unchecked(s, c, n); 102 } 103 104 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 105 size_t n) 106 { 107 return asan_memcpy_unchecked(dst, src, n); 108 } 109 110 #else /*__KERNEL__*/ 111 /* Compiling for TA */ 112 113 static void *memset_unchecked(void *s, int c, size_t n) 114 { 115 return memset(s, c, n); 116 } 117 118 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 119 size_t n) 120 { 121 return memcpy(dst, src, n); 122 } 123 124 #endif /*__KERNEL__*/ 125 126 #include "bget.c" /* this is ugly, but this is bget */ 127 128 struct malloc_pool { 129 void *buf; 130 size_t len; 131 }; 132 133 struct malloc_ctx { 134 struct bpoolset poolset; 135 struct malloc_pool *pool; 136 size_t pool_len; 137 #ifdef BufStats 138 struct malloc_stats mstats; 139 #endif 140 #ifdef __KERNEL__ 141 unsigned int spinlock; 142 #endif 143 }; 144 145 #ifdef __KERNEL__ 146 147 static uint32_t malloc_lock(struct malloc_ctx *ctx) 148 { 149 return cpu_spin_lock_xsave(&ctx->spinlock); 150 } 151 152 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 153 { 154 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 155 } 156 157 #else /* __KERNEL__ */ 158 159 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 160 { 161 return 0; 162 } 163 164 static void malloc_unlock(struct malloc_ctx *ctx __unused, 165 uint32_t exceptions __unused) 166 { 167 } 168 169 #endif /* __KERNEL__ */ 170 171 #define DEFINE_CTX(name) struct malloc_ctx name = \ 172 { .poolset = { .freelist = { {0, 0}, \ 173 {&name.poolset.freelist, \ 174 &name.poolset.freelist}}}} 175 176 static DEFINE_CTX(malloc_ctx); 177 178 #ifdef CFG_VIRTUALIZATION 179 static __nex_data DEFINE_CTX(nex_malloc_ctx); 180 #endif 181 182 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 183 { 184 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 185 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 186 print_kernel_stack(); 187 #endif 188 } 189 190 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 191 static __maybe_unused bufsize bget_buf_size(void *buf) 192 { 193 bufsize osize; /* Old size of buffer */ 194 struct bhead *b; 195 196 b = BH(((char *)buf) - sizeof(struct bhead)); 197 osize = -b->bsize; 198 #ifdef BECtl 199 if (osize == 0) { 200 /* Buffer acquired directly through acqfcn. */ 201 struct bdhead *bd; 202 203 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 204 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 205 } else 206 #endif 207 osize -= sizeof(struct bhead); 208 assert(osize > 0); 209 return osize; 210 } 211 212 static void *maybe_tag_buf(void *buf, size_t __maybe_unused requested_size) 213 { 214 if (!buf) 215 return NULL; 216 217 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant); 218 219 if (MEMTAG_IS_ENABLED) { 220 size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE); 221 222 /* 223 * Allocated buffer can be larger than requested when 224 * allocating with memalign(), but we should never tag more 225 * than allocated. 226 */ 227 assert(bget_buf_size(buf) >= sz); 228 return memtag_set_random_tags(buf, sz); 229 } 230 231 #if defined(__KERNEL__) 232 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 233 asan_tag_access(buf, (uint8_t *)buf + requested_size); 234 #endif 235 return buf; 236 } 237 238 static void *maybe_untag_buf(void *buf) 239 { 240 if (!buf) 241 return NULL; 242 243 if (MEMTAG_IS_ENABLED) { 244 size_t sz = 0; 245 246 memtag_assert_tag(buf); /* Trying to catch double free early */ 247 sz = bget_buf_size(memtag_strip_tag(buf)); 248 return memtag_set_tags(buf, sz, 0); 249 } 250 251 #if defined(__KERNEL__) 252 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 253 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 254 #endif 255 return buf; 256 } 257 258 static void *strip_tag(void *buf) 259 { 260 if (MEMTAG_IS_ENABLED) 261 return memtag_strip_tag(buf); 262 return buf; 263 } 264 265 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 266 { 267 #if defined(__KERNEL__) 268 asan_tag_heap_free(buf, (uint8_t *)buf + len); 269 #endif 270 } 271 272 #ifdef BufStats 273 274 static void *raw_malloc_return_hook(void *p, size_t requested_size, 275 struct malloc_ctx *ctx) 276 { 277 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 278 ctx->mstats.max_allocated = ctx->poolset.totalloc; 279 280 if (!p) { 281 ctx->mstats.num_alloc_fail++; 282 print_oom(requested_size, ctx); 283 if (requested_size > ctx->mstats.biggest_alloc_fail) { 284 ctx->mstats.biggest_alloc_fail = requested_size; 285 ctx->mstats.biggest_alloc_fail_used = 286 ctx->poolset.totalloc; 287 } 288 } 289 290 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 291 } 292 293 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 294 { 295 uint32_t exceptions = malloc_lock(ctx); 296 297 ctx->mstats.max_allocated = 0; 298 ctx->mstats.num_alloc_fail = 0; 299 ctx->mstats.biggest_alloc_fail = 0; 300 ctx->mstats.biggest_alloc_fail_used = 0; 301 malloc_unlock(ctx, exceptions); 302 } 303 304 void malloc_reset_stats(void) 305 { 306 gen_malloc_reset_stats(&malloc_ctx); 307 } 308 309 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 310 struct malloc_stats *stats) 311 { 312 uint32_t exceptions = malloc_lock(ctx); 313 314 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 315 stats->allocated = ctx->poolset.totalloc; 316 malloc_unlock(ctx, exceptions); 317 } 318 319 void malloc_get_stats(struct malloc_stats *stats) 320 { 321 gen_malloc_get_stats(&malloc_ctx, stats); 322 } 323 324 #else /* BufStats */ 325 326 static void *raw_malloc_return_hook(void *p, size_t requested_size, 327 struct malloc_ctx *ctx ) 328 { 329 if (!p) 330 print_oom(requested_size, ctx); 331 332 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 333 } 334 335 #endif /* BufStats */ 336 337 #ifdef BufValid 338 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 339 { 340 size_t n; 341 342 for (n = 0; n < ctx->pool_len; n++) 343 bpoolv(ctx->pool[n].buf); 344 } 345 #else 346 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 347 { 348 } 349 #endif 350 351 struct bpool_iterator { 352 struct bfhead *next_buf; 353 size_t pool_idx; 354 }; 355 356 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 357 struct bpool_iterator *iterator) 358 { 359 iterator->pool_idx = 0; 360 iterator->next_buf = BFH(ctx->pool[0].buf); 361 } 362 363 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 364 size_t *len, bool *isfree) 365 { 366 struct bfhead *b = iterator->next_buf; 367 bufsize bs = b->bh.bsize; 368 369 if (bs == ESent) 370 return false; 371 372 if (bs < 0) { 373 /* Allocated buffer */ 374 bs = -bs; 375 376 *isfree = false; 377 } else { 378 /* Free Buffer */ 379 *isfree = true; 380 381 /* Assert that the free list links are intact */ 382 assert(b->ql.blink->ql.flink == b); 383 assert(b->ql.flink->ql.blink == b); 384 } 385 386 *buf = (uint8_t *)b + sizeof(struct bhead); 387 *len = bs - sizeof(struct bhead); 388 389 iterator->next_buf = BFH((uint8_t *)b + bs); 390 return true; 391 } 392 393 static bool bpool_foreach(struct malloc_ctx *ctx, 394 struct bpool_iterator *iterator, void **buf) 395 { 396 while (true) { 397 size_t len; 398 bool isfree; 399 400 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 401 if (isfree) 402 continue; 403 return true; 404 } 405 406 if ((iterator->pool_idx + 1) >= ctx->pool_len) 407 return false; 408 409 iterator->pool_idx++; 410 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 411 } 412 } 413 414 /* Convenience macro for looping over all allocated buffers */ 415 #define BPOOL_FOREACH(ctx, iterator, bp) \ 416 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 417 bpool_foreach((ctx),(iterator), (bp));) 418 419 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 420 size_t pl_size, struct malloc_ctx *ctx) 421 { 422 void *ptr = NULL; 423 bufsize s; 424 425 if (!alignment || !IS_POWER_OF_TWO(alignment)) 426 return NULL; 427 428 raw_malloc_validate_pools(ctx); 429 430 /* Compute total size, excluding the header */ 431 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 432 goto out; 433 434 /* BGET doesn't like 0 sized allocations */ 435 if (!s) 436 s++; 437 438 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 439 out: 440 return raw_malloc_return_hook(ptr, pl_size, ctx); 441 } 442 443 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 444 struct malloc_ctx *ctx) 445 { 446 /* 447 * Note that we're feeding SizeQ as alignment, this is the smallest 448 * alignment that bget() can use. 449 */ 450 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 451 } 452 453 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 454 { 455 raw_malloc_validate_pools(ctx); 456 457 if (ptr) 458 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 459 } 460 461 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 462 size_t pl_size, struct malloc_ctx *ctx) 463 { 464 void *ptr = NULL; 465 bufsize s; 466 467 raw_malloc_validate_pools(ctx); 468 469 /* Compute total size, excluding hdr_size */ 470 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 471 goto out; 472 if (ADD_OVERFLOW(s, ftr_size, &s)) 473 goto out; 474 475 /* BGET doesn't like 0 sized allocations */ 476 if (!s) 477 s++; 478 479 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 480 out: 481 return raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 482 } 483 484 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 485 size_t pl_size, struct malloc_ctx *ctx) 486 { 487 void *p = NULL; 488 bufsize s; 489 490 /* Compute total size */ 491 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 492 goto out; 493 if (ADD_OVERFLOW(s, ftr_size, &s)) 494 goto out; 495 496 raw_malloc_validate_pools(ctx); 497 498 /* BGET doesn't like 0 sized allocations */ 499 if (!s) 500 s++; 501 502 p = bgetr(maybe_untag_buf(ptr), 0, 0, s, &ctx->poolset); 503 out: 504 return raw_malloc_return_hook(p, pl_size, ctx); 505 } 506 507 #ifdef ENABLE_MDBG 508 509 struct mdbg_hdr { 510 const char *fname; 511 uint16_t line; 512 uint32_t pl_size; 513 uint32_t magic; 514 #if defined(ARM64) 515 uint64_t pad; 516 #endif 517 }; 518 519 #define MDBG_HEADER_MAGIC 0xadadadad 520 #define MDBG_FOOTER_MAGIC 0xecececec 521 522 static size_t mdbg_get_ftr_size(size_t pl_size) 523 { 524 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 525 526 return ftr_pad + sizeof(uint32_t); 527 } 528 529 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 530 { 531 uint32_t *footer; 532 533 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 534 mdbg_get_ftr_size(hdr->pl_size)); 535 footer--; 536 return footer; 537 } 538 539 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 540 int lineno, size_t pl_size) 541 { 542 uint32_t *footer; 543 544 hdr->fname = fname; 545 hdr->line = lineno; 546 hdr->pl_size = pl_size; 547 hdr->magic = MDBG_HEADER_MAGIC; 548 549 footer = mdbg_get_footer(hdr); 550 *footer = MDBG_FOOTER_MAGIC; 551 } 552 553 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 554 int lineno, size_t size) 555 { 556 struct mdbg_hdr *hdr; 557 uint32_t exceptions = malloc_lock(ctx); 558 559 /* 560 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 561 */ 562 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 563 564 hdr = raw_malloc(sizeof(struct mdbg_hdr), 565 mdbg_get_ftr_size(size), size, ctx); 566 if (hdr) { 567 mdbg_update_hdr(hdr, fname, lineno, size); 568 hdr++; 569 } 570 571 malloc_unlock(ctx, exceptions); 572 return hdr; 573 } 574 575 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 576 { 577 assert(hdr->magic == MDBG_HEADER_MAGIC); 578 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 579 } 580 581 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 582 { 583 struct mdbg_hdr *hdr = ptr; 584 585 if (hdr) { 586 hdr--; 587 assert_header(hdr); 588 hdr->magic = 0; 589 *mdbg_get_footer(hdr) = 0; 590 raw_free(hdr, ctx, wipe); 591 } 592 } 593 594 static void free_helper(void *ptr, bool wipe) 595 { 596 uint32_t exceptions = malloc_lock(&malloc_ctx); 597 598 gen_mdbg_free(&malloc_ctx, ptr, wipe); 599 malloc_unlock(&malloc_ctx, exceptions); 600 } 601 602 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 603 size_t nmemb, size_t size) 604 { 605 struct mdbg_hdr *hdr; 606 uint32_t exceptions = malloc_lock(ctx); 607 608 hdr = raw_calloc(sizeof(struct mdbg_hdr), 609 mdbg_get_ftr_size(nmemb * size), nmemb, size, 610 ctx); 611 if (hdr) { 612 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 613 hdr++; 614 } 615 malloc_unlock(ctx, exceptions); 616 return hdr; 617 } 618 619 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 620 int lineno, void *ptr, size_t size) 621 { 622 struct mdbg_hdr *hdr = ptr; 623 624 if (hdr) { 625 hdr--; 626 assert_header(hdr); 627 } 628 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 629 mdbg_get_ftr_size(size), size, ctx); 630 if (hdr) { 631 mdbg_update_hdr(hdr, fname, lineno, size); 632 hdr++; 633 } 634 return hdr; 635 } 636 637 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 638 int lineno, void *ptr, size_t size) 639 { 640 void *p; 641 uint32_t exceptions = malloc_lock(ctx); 642 643 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 644 malloc_unlock(ctx, exceptions); 645 return p; 646 } 647 648 #define realloc_unlocked(ctx, ptr, size) \ 649 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 650 651 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 652 int lineno, size_t alignment, size_t size) 653 { 654 struct mdbg_hdr *hdr; 655 uint32_t exceptions = malloc_lock(ctx); 656 657 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 658 alignment, size, ctx); 659 if (hdr) { 660 mdbg_update_hdr(hdr, fname, lineno, size); 661 hdr++; 662 } 663 malloc_unlock(ctx, exceptions); 664 return hdr; 665 } 666 667 668 static void *get_payload_start_size(void *raw_buf, size_t *size) 669 { 670 struct mdbg_hdr *hdr = raw_buf; 671 672 assert(bget_buf_size(hdr) >= hdr->pl_size); 673 *size = hdr->pl_size; 674 return hdr + 1; 675 } 676 677 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 678 { 679 struct bpool_iterator itr; 680 void *b; 681 uint32_t exceptions = malloc_lock(ctx); 682 683 raw_malloc_validate_pools(ctx); 684 685 BPOOL_FOREACH(ctx, &itr, &b) { 686 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 687 688 assert_header(hdr); 689 690 if (bufdump > 0) { 691 const char *fname = hdr->fname; 692 693 if (!fname) 694 fname = "unknown"; 695 696 IMSG("buffer: %d bytes %s:%d\n", 697 hdr->pl_size, fname, hdr->line); 698 } 699 } 700 701 malloc_unlock(ctx, exceptions); 702 } 703 704 void *mdbg_malloc(const char *fname, int lineno, size_t size) 705 { 706 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 707 } 708 709 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 710 { 711 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 712 } 713 714 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 715 { 716 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 717 } 718 719 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 720 size_t size) 721 { 722 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 723 } 724 725 void mdbg_check(int bufdump) 726 { 727 gen_mdbg_check(&malloc_ctx, bufdump); 728 } 729 730 /* 731 * Since malloc debug is enabled, malloc() and friends are redirected by macros 732 * to mdbg_malloc() etc. 733 * We still want to export the standard entry points in case they are referenced 734 * by the application, either directly or via external libraries. 735 */ 736 #undef malloc 737 void *malloc(size_t size) 738 { 739 return mdbg_malloc(__FILE__, __LINE__, size); 740 } 741 742 #undef calloc 743 void *calloc(size_t nmemb, size_t size) 744 { 745 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 746 } 747 748 #undef realloc 749 void *realloc(void *ptr, size_t size) 750 { 751 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 752 } 753 754 #else /* ENABLE_MDBG */ 755 756 void *malloc(size_t size) 757 { 758 void *p; 759 uint32_t exceptions = malloc_lock(&malloc_ctx); 760 761 p = raw_malloc(0, 0, size, &malloc_ctx); 762 malloc_unlock(&malloc_ctx, exceptions); 763 return p; 764 } 765 766 static void free_helper(void *ptr, bool wipe) 767 { 768 uint32_t exceptions = malloc_lock(&malloc_ctx); 769 770 raw_free(ptr, &malloc_ctx, wipe); 771 malloc_unlock(&malloc_ctx, exceptions); 772 } 773 774 void *calloc(size_t nmemb, size_t size) 775 { 776 void *p; 777 uint32_t exceptions = malloc_lock(&malloc_ctx); 778 779 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 780 malloc_unlock(&malloc_ctx, exceptions); 781 return p; 782 } 783 784 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 785 size_t size) 786 { 787 return raw_realloc(ptr, 0, 0, size, ctx); 788 } 789 790 void *realloc(void *ptr, size_t size) 791 { 792 void *p; 793 uint32_t exceptions = malloc_lock(&malloc_ctx); 794 795 p = realloc_unlocked(&malloc_ctx, ptr, size); 796 malloc_unlock(&malloc_ctx, exceptions); 797 return p; 798 } 799 800 void *memalign(size_t alignment, size_t size) 801 { 802 void *p; 803 uint32_t exceptions = malloc_lock(&malloc_ctx); 804 805 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 806 malloc_unlock(&malloc_ctx, exceptions); 807 return p; 808 } 809 810 static void *get_payload_start_size(void *ptr, size_t *size) 811 { 812 *size = bget_buf_size(ptr); 813 return ptr; 814 } 815 816 #endif 817 818 void free(void *ptr) 819 { 820 free_helper(ptr, false); 821 } 822 823 void free_wipe(void *ptr) 824 { 825 free_helper(ptr, true); 826 } 827 828 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 829 { 830 void *p; 831 size_t l; 832 uint32_t exceptions; 833 uintptr_t start = (uintptr_t)buf; 834 uintptr_t end = start + len; 835 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 836 837 start = ROUNDUP(start, SizeQuant); 838 end = ROUNDDOWN(end, SizeQuant); 839 840 if (start > end || (end - start) < min_len) { 841 DMSG("Skipping too small pool"); 842 return; 843 } 844 845 /* First pool requires a bigger size */ 846 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 847 DMSG("Skipping too small initial pool"); 848 return; 849 } 850 851 exceptions = malloc_lock(ctx); 852 853 tag_asan_free((void *)start, end - start); 854 bpool((void *)start, end - start, &ctx->poolset); 855 l = ctx->pool_len + 1; 856 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 857 assert(p); 858 ctx->pool = p; 859 ctx->pool[ctx->pool_len].buf = (void *)start; 860 ctx->pool[ctx->pool_len].len = end - start; 861 #ifdef BufStats 862 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 863 #endif 864 ctx->pool_len = l; 865 malloc_unlock(ctx, exceptions); 866 } 867 868 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 869 void *buf, size_t len) 870 { 871 struct bpool_iterator itr; 872 void *b; 873 uint8_t *start_buf = strip_tag(buf); 874 uint8_t *end_buf = start_buf + len; 875 bool ret = false; 876 uint32_t exceptions = malloc_lock(ctx); 877 878 raw_malloc_validate_pools(ctx); 879 880 /* Check for wrapping */ 881 if (start_buf > end_buf) 882 goto out; 883 884 BPOOL_FOREACH(ctx, &itr, &b) { 885 uint8_t *start_b; 886 uint8_t *end_b; 887 size_t s; 888 889 start_b = get_payload_start_size(b, &s); 890 end_b = start_b + s; 891 892 if (start_buf >= start_b && end_buf <= end_b) { 893 ret = true; 894 goto out; 895 } 896 } 897 898 out: 899 malloc_unlock(ctx, exceptions); 900 901 return ret; 902 } 903 904 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 905 void *buf, size_t len) 906 { 907 uintptr_t buf_start = (uintptr_t) buf; 908 uintptr_t buf_end = buf_start + len; 909 size_t n; 910 bool ret = false; 911 uint32_t exceptions = malloc_lock(ctx); 912 913 raw_malloc_validate_pools(ctx); 914 915 for (n = 0; n < ctx->pool_len; n++) { 916 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 917 uintptr_t pool_end = pool_start + ctx->pool[n].len; 918 919 if (buf_start > buf_end || pool_start > pool_end) { 920 ret = true; /* Wrapping buffers, shouldn't happen */ 921 goto out; 922 } 923 924 if (buf_end > pool_start || buf_start < pool_end) { 925 ret = true; 926 goto out; 927 } 928 } 929 930 out: 931 malloc_unlock(ctx, exceptions); 932 return ret; 933 } 934 935 size_t raw_malloc_get_ctx_size(void) 936 { 937 return sizeof(struct malloc_ctx); 938 } 939 940 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 941 { 942 memset(ctx, 0, sizeof(*ctx)); 943 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 944 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 945 } 946 947 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 948 { 949 gen_malloc_add_pool(ctx, buf, len); 950 } 951 952 #ifdef CFG_WITH_STATS 953 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats) 954 { 955 gen_malloc_get_stats(ctx, stats); 956 } 957 #endif 958 959 void malloc_add_pool(void *buf, size_t len) 960 { 961 gen_malloc_add_pool(&malloc_ctx, buf, len); 962 } 963 964 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 965 { 966 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 967 } 968 969 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 970 { 971 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 972 } 973 974 #ifdef CFG_VIRTUALIZATION 975 976 #ifndef ENABLE_MDBG 977 978 void *nex_malloc(size_t size) 979 { 980 void *p; 981 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 982 983 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 984 malloc_unlock(&nex_malloc_ctx, exceptions); 985 return p; 986 } 987 988 void *nex_calloc(size_t nmemb, size_t size) 989 { 990 void *p; 991 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 992 993 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 994 malloc_unlock(&nex_malloc_ctx, exceptions); 995 return p; 996 } 997 998 void *nex_realloc(void *ptr, size_t size) 999 { 1000 void *p; 1001 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1002 1003 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 1004 malloc_unlock(&nex_malloc_ctx, exceptions); 1005 return p; 1006 } 1007 1008 void *nex_memalign(size_t alignment, size_t size) 1009 { 1010 void *p; 1011 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1012 1013 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 1014 malloc_unlock(&nex_malloc_ctx, exceptions); 1015 return p; 1016 } 1017 1018 void nex_free(void *ptr) 1019 { 1020 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1021 1022 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 1023 malloc_unlock(&nex_malloc_ctx, exceptions); 1024 } 1025 1026 #else /* ENABLE_MDBG */ 1027 1028 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 1029 { 1030 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1031 } 1032 1033 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1034 { 1035 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1036 } 1037 1038 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1039 { 1040 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1041 } 1042 1043 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1044 size_t size) 1045 { 1046 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1047 } 1048 1049 void nex_mdbg_check(int bufdump) 1050 { 1051 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1052 } 1053 1054 void nex_free(void *ptr) 1055 { 1056 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1057 1058 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1059 malloc_unlock(&nex_malloc_ctx, exceptions); 1060 } 1061 1062 #endif /* ENABLE_MDBG */ 1063 1064 void nex_malloc_add_pool(void *buf, size_t len) 1065 { 1066 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1067 } 1068 1069 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1070 { 1071 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1072 } 1073 1074 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1075 { 1076 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1077 } 1078 1079 #ifdef BufStats 1080 1081 void nex_malloc_reset_stats(void) 1082 { 1083 gen_malloc_reset_stats(&nex_malloc_ctx); 1084 } 1085 1086 void nex_malloc_get_stats(struct malloc_stats *stats) 1087 { 1088 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1089 } 1090 1091 #endif 1092 1093 #endif 1094