1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <stdlib_ext.h> 86 #include <string.h> 87 #include <trace.h> 88 #include <util.h> 89 90 #if defined(__KERNEL__) 91 /* Compiling for TEE Core */ 92 #include <kernel/asan.h> 93 #include <kernel/spinlock.h> 94 #include <kernel/unwind.h> 95 96 static void tag_asan_free(void *buf, size_t len) 97 { 98 asan_tag_heap_free(buf, (uint8_t *)buf + len); 99 } 100 101 static void tag_asan_alloced(void *buf, size_t len) 102 { 103 asan_tag_access(buf, (uint8_t *)buf + len); 104 } 105 106 static void *memset_unchecked(void *s, int c, size_t n) 107 { 108 return asan_memset_unchecked(s, c, n); 109 } 110 111 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 112 size_t n) 113 { 114 return asan_memcpy_unchecked(dst, src, n); 115 } 116 117 #else /*__KERNEL__*/ 118 /* Compiling for TA */ 119 120 static void tag_asan_free(void *buf __unused, size_t len __unused) 121 { 122 } 123 124 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 125 { 126 } 127 128 static void *memset_unchecked(void *s, int c, size_t n) 129 { 130 return memset(s, c, n); 131 } 132 133 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 134 size_t n) 135 { 136 return memcpy(dst, src, n); 137 } 138 139 #endif /*__KERNEL__*/ 140 141 #include "bget.c" /* this is ugly, but this is bget */ 142 143 struct malloc_pool { 144 void *buf; 145 size_t len; 146 }; 147 148 struct malloc_ctx { 149 struct bpoolset poolset; 150 struct malloc_pool *pool; 151 size_t pool_len; 152 #ifdef BufStats 153 struct malloc_stats mstats; 154 #endif 155 #ifdef __KERNEL__ 156 unsigned int spinlock; 157 #endif 158 }; 159 160 #ifdef __KERNEL__ 161 162 static uint32_t malloc_lock(struct malloc_ctx *ctx) 163 { 164 return cpu_spin_lock_xsave(&ctx->spinlock); 165 } 166 167 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 168 { 169 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 170 } 171 172 #else /* __KERNEL__ */ 173 174 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 175 { 176 return 0; 177 } 178 179 static void malloc_unlock(struct malloc_ctx *ctx __unused, 180 uint32_t exceptions __unused) 181 { 182 } 183 184 #endif /* __KERNEL__ */ 185 186 #define DEFINE_CTX(name) struct malloc_ctx name = \ 187 { .poolset = { .freelist = { {0, 0}, \ 188 {&name.poolset.freelist, \ 189 &name.poolset.freelist}}}} 190 191 static DEFINE_CTX(malloc_ctx); 192 193 #ifdef CFG_VIRTUALIZATION 194 static __nex_data DEFINE_CTX(nex_malloc_ctx); 195 #endif 196 197 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 198 { 199 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 200 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 201 print_kernel_stack(); 202 #endif 203 } 204 205 #ifdef BufStats 206 207 static void raw_malloc_return_hook(void *p, size_t requested_size, 208 struct malloc_ctx *ctx) 209 { 210 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 211 ctx->mstats.max_allocated = ctx->poolset.totalloc; 212 213 if (!p) { 214 ctx->mstats.num_alloc_fail++; 215 print_oom(requested_size, ctx); 216 if (requested_size > ctx->mstats.biggest_alloc_fail) { 217 ctx->mstats.biggest_alloc_fail = requested_size; 218 ctx->mstats.biggest_alloc_fail_used = 219 ctx->poolset.totalloc; 220 } 221 } 222 } 223 224 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 225 { 226 uint32_t exceptions = malloc_lock(ctx); 227 228 ctx->mstats.max_allocated = 0; 229 ctx->mstats.num_alloc_fail = 0; 230 ctx->mstats.biggest_alloc_fail = 0; 231 ctx->mstats.biggest_alloc_fail_used = 0; 232 malloc_unlock(ctx, exceptions); 233 } 234 235 void malloc_reset_stats(void) 236 { 237 gen_malloc_reset_stats(&malloc_ctx); 238 } 239 240 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 241 struct malloc_stats *stats) 242 { 243 uint32_t exceptions = malloc_lock(ctx); 244 245 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 246 stats->allocated = ctx->poolset.totalloc; 247 malloc_unlock(ctx, exceptions); 248 } 249 250 void malloc_get_stats(struct malloc_stats *stats) 251 { 252 gen_malloc_get_stats(&malloc_ctx, stats); 253 } 254 255 #else /* BufStats */ 256 257 static void raw_malloc_return_hook(void *p, size_t requested_size, 258 struct malloc_ctx *ctx ) 259 { 260 if (!p) 261 print_oom(requested_size, ctx); 262 } 263 264 #endif /* BufStats */ 265 266 #ifdef BufValid 267 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 268 { 269 size_t n; 270 271 for (n = 0; n < ctx->pool_len; n++) 272 bpoolv(ctx->pool[n].buf); 273 } 274 #else 275 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 276 { 277 } 278 #endif 279 280 struct bpool_iterator { 281 struct bfhead *next_buf; 282 size_t pool_idx; 283 }; 284 285 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 286 struct bpool_iterator *iterator) 287 { 288 iterator->pool_idx = 0; 289 iterator->next_buf = BFH(ctx->pool[0].buf); 290 } 291 292 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 293 size_t *len, bool *isfree) 294 { 295 struct bfhead *b = iterator->next_buf; 296 bufsize bs = b->bh.bsize; 297 298 if (bs == ESent) 299 return false; 300 301 if (bs < 0) { 302 /* Allocated buffer */ 303 bs = -bs; 304 305 *isfree = false; 306 } else { 307 /* Free Buffer */ 308 *isfree = true; 309 310 /* Assert that the free list links are intact */ 311 assert(b->ql.blink->ql.flink == b); 312 assert(b->ql.flink->ql.blink == b); 313 } 314 315 *buf = (uint8_t *)b + sizeof(struct bhead); 316 *len = bs - sizeof(struct bhead); 317 318 iterator->next_buf = BFH((uint8_t *)b + bs); 319 return true; 320 } 321 322 static bool bpool_foreach(struct malloc_ctx *ctx, 323 struct bpool_iterator *iterator, void **buf) 324 { 325 while (true) { 326 size_t len; 327 bool isfree; 328 329 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 330 if (isfree) 331 continue; 332 return true; 333 } 334 335 if ((iterator->pool_idx + 1) >= ctx->pool_len) 336 return false; 337 338 iterator->pool_idx++; 339 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 340 } 341 } 342 343 /* Convenience macro for looping over all allocated buffers */ 344 #define BPOOL_FOREACH(ctx, iterator, bp) \ 345 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 346 bpool_foreach((ctx),(iterator), (bp));) 347 348 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 349 size_t pl_size, struct malloc_ctx *ctx) 350 { 351 void *ptr = NULL; 352 bufsize s; 353 354 if (!alignment || !IS_POWER_OF_TWO(alignment)) 355 return NULL; 356 357 raw_malloc_validate_pools(ctx); 358 359 /* Compute total size, excluding the header */ 360 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 361 goto out; 362 363 /* BGET doesn't like 0 sized allocations */ 364 if (!s) 365 s++; 366 367 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 368 out: 369 raw_malloc_return_hook(ptr, pl_size, ctx); 370 371 return ptr; 372 } 373 374 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 375 struct malloc_ctx *ctx) 376 { 377 /* 378 * Note that we're feeding SizeQ as alignment, this is the smallest 379 * alignment that bget() can use. 380 */ 381 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 382 } 383 384 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 385 { 386 raw_malloc_validate_pools(ctx); 387 388 if (ptr) 389 brel(ptr, &ctx->poolset, wipe); 390 } 391 392 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 393 size_t pl_size, struct malloc_ctx *ctx) 394 { 395 void *ptr = NULL; 396 bufsize s; 397 398 raw_malloc_validate_pools(ctx); 399 400 /* Compute total size, excluding hdr_size */ 401 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 402 goto out; 403 if (ADD_OVERFLOW(s, ftr_size, &s)) 404 goto out; 405 406 /* BGET doesn't like 0 sized allocations */ 407 if (!s) 408 s++; 409 410 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 411 out: 412 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 413 414 return ptr; 415 } 416 417 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 418 size_t pl_size, struct malloc_ctx *ctx) 419 { 420 void *p = NULL; 421 bufsize s; 422 423 /* Compute total size */ 424 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 425 goto out; 426 if (ADD_OVERFLOW(s, ftr_size, &s)) 427 goto out; 428 429 raw_malloc_validate_pools(ctx); 430 431 /* BGET doesn't like 0 sized allocations */ 432 if (!s) 433 s++; 434 435 p = bgetr(ptr, 0, 0, s, &ctx->poolset); 436 out: 437 raw_malloc_return_hook(p, pl_size, ctx); 438 439 return p; 440 } 441 442 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 443 static __maybe_unused bufsize bget_buf_size(void *buf) 444 { 445 bufsize osize; /* Old size of buffer */ 446 struct bhead *b; 447 448 b = BH(((char *)buf) - sizeof(struct bhead)); 449 osize = -b->bsize; 450 #ifdef BECtl 451 if (osize == 0) { 452 /* Buffer acquired directly through acqfcn. */ 453 struct bdhead *bd; 454 455 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 456 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 457 } else 458 #endif 459 osize -= sizeof(struct bhead); 460 assert(osize > 0); 461 return osize; 462 } 463 464 #ifdef ENABLE_MDBG 465 466 struct mdbg_hdr { 467 const char *fname; 468 uint16_t line; 469 uint32_t pl_size; 470 uint32_t magic; 471 #if defined(ARM64) 472 uint64_t pad; 473 #endif 474 }; 475 476 #define MDBG_HEADER_MAGIC 0xadadadad 477 #define MDBG_FOOTER_MAGIC 0xecececec 478 479 static size_t mdbg_get_ftr_size(size_t pl_size) 480 { 481 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 482 483 return ftr_pad + sizeof(uint32_t); 484 } 485 486 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 487 { 488 uint32_t *footer; 489 490 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 491 mdbg_get_ftr_size(hdr->pl_size)); 492 footer--; 493 return footer; 494 } 495 496 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 497 int lineno, size_t pl_size) 498 { 499 uint32_t *footer; 500 501 hdr->fname = fname; 502 hdr->line = lineno; 503 hdr->pl_size = pl_size; 504 hdr->magic = MDBG_HEADER_MAGIC; 505 506 footer = mdbg_get_footer(hdr); 507 *footer = MDBG_FOOTER_MAGIC; 508 } 509 510 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 511 int lineno, size_t size) 512 { 513 struct mdbg_hdr *hdr; 514 uint32_t exceptions = malloc_lock(ctx); 515 516 /* 517 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 518 */ 519 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 520 521 hdr = raw_malloc(sizeof(struct mdbg_hdr), 522 mdbg_get_ftr_size(size), size, ctx); 523 if (hdr) { 524 mdbg_update_hdr(hdr, fname, lineno, size); 525 hdr++; 526 } 527 528 malloc_unlock(ctx, exceptions); 529 return hdr; 530 } 531 532 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 533 { 534 assert(hdr->magic == MDBG_HEADER_MAGIC); 535 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 536 } 537 538 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 539 { 540 struct mdbg_hdr *hdr = ptr; 541 542 if (hdr) { 543 hdr--; 544 assert_header(hdr); 545 hdr->magic = 0; 546 *mdbg_get_footer(hdr) = 0; 547 raw_free(hdr, ctx, wipe); 548 } 549 } 550 551 static void free_helper(void *ptr, bool wipe) 552 { 553 uint32_t exceptions = malloc_lock(&malloc_ctx); 554 555 gen_mdbg_free(&malloc_ctx, ptr, wipe); 556 malloc_unlock(&malloc_ctx, exceptions); 557 } 558 559 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 560 size_t nmemb, size_t size) 561 { 562 struct mdbg_hdr *hdr; 563 uint32_t exceptions = malloc_lock(ctx); 564 565 hdr = raw_calloc(sizeof(struct mdbg_hdr), 566 mdbg_get_ftr_size(nmemb * size), nmemb, size, 567 ctx); 568 if (hdr) { 569 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 570 hdr++; 571 } 572 malloc_unlock(ctx, exceptions); 573 return hdr; 574 } 575 576 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 577 int lineno, void *ptr, size_t size) 578 { 579 struct mdbg_hdr *hdr = ptr; 580 581 if (hdr) { 582 hdr--; 583 assert_header(hdr); 584 } 585 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 586 mdbg_get_ftr_size(size), size, ctx); 587 if (hdr) { 588 mdbg_update_hdr(hdr, fname, lineno, size); 589 hdr++; 590 } 591 return hdr; 592 } 593 594 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 595 int lineno, void *ptr, size_t size) 596 { 597 void *p; 598 uint32_t exceptions = malloc_lock(ctx); 599 600 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 601 malloc_unlock(ctx, exceptions); 602 return p; 603 } 604 605 #define realloc_unlocked(ctx, ptr, size) \ 606 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 607 608 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 609 int lineno, size_t alignment, size_t size) 610 { 611 struct mdbg_hdr *hdr; 612 uint32_t exceptions = malloc_lock(ctx); 613 614 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 615 alignment, size, ctx); 616 if (hdr) { 617 mdbg_update_hdr(hdr, fname, lineno, size); 618 hdr++; 619 } 620 malloc_unlock(ctx, exceptions); 621 return hdr; 622 } 623 624 625 static void *get_payload_start_size(void *raw_buf, size_t *size) 626 { 627 struct mdbg_hdr *hdr = raw_buf; 628 629 assert(bget_buf_size(hdr) >= hdr->pl_size); 630 *size = hdr->pl_size; 631 return hdr + 1; 632 } 633 634 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 635 { 636 struct bpool_iterator itr; 637 void *b; 638 uint32_t exceptions = malloc_lock(ctx); 639 640 raw_malloc_validate_pools(ctx); 641 642 BPOOL_FOREACH(ctx, &itr, &b) { 643 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 644 645 assert_header(hdr); 646 647 if (bufdump > 0) { 648 const char *fname = hdr->fname; 649 650 if (!fname) 651 fname = "unknown"; 652 653 IMSG("buffer: %d bytes %s:%d\n", 654 hdr->pl_size, fname, hdr->line); 655 } 656 } 657 658 malloc_unlock(ctx, exceptions); 659 } 660 661 void *mdbg_malloc(const char *fname, int lineno, size_t size) 662 { 663 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 664 } 665 666 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 667 { 668 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 669 } 670 671 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 672 { 673 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 674 } 675 676 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 677 size_t size) 678 { 679 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 680 } 681 682 void mdbg_check(int bufdump) 683 { 684 gen_mdbg_check(&malloc_ctx, bufdump); 685 } 686 687 /* 688 * Since malloc debug is enabled, malloc() and friends are redirected by macros 689 * to mdbg_malloc() etc. 690 * We still want to export the standard entry points in case they are referenced 691 * by the application, either directly or via external libraries. 692 */ 693 #undef malloc 694 void *malloc(size_t size) 695 { 696 return mdbg_malloc(__FILE__, __LINE__, size); 697 } 698 699 #undef calloc 700 void *calloc(size_t nmemb, size_t size) 701 { 702 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 703 } 704 705 #undef realloc 706 void *realloc(void *ptr, size_t size) 707 { 708 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 709 } 710 711 #else /* ENABLE_MDBG */ 712 713 void *malloc(size_t size) 714 { 715 void *p; 716 uint32_t exceptions = malloc_lock(&malloc_ctx); 717 718 p = raw_malloc(0, 0, size, &malloc_ctx); 719 malloc_unlock(&malloc_ctx, exceptions); 720 return p; 721 } 722 723 static void free_helper(void *ptr, bool wipe) 724 { 725 uint32_t exceptions = malloc_lock(&malloc_ctx); 726 727 raw_free(ptr, &malloc_ctx, wipe); 728 malloc_unlock(&malloc_ctx, exceptions); 729 } 730 731 void *calloc(size_t nmemb, size_t size) 732 { 733 void *p; 734 uint32_t exceptions = malloc_lock(&malloc_ctx); 735 736 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 737 malloc_unlock(&malloc_ctx, exceptions); 738 return p; 739 } 740 741 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 742 size_t size) 743 { 744 return raw_realloc(ptr, 0, 0, size, ctx); 745 } 746 747 void *realloc(void *ptr, size_t size) 748 { 749 void *p; 750 uint32_t exceptions = malloc_lock(&malloc_ctx); 751 752 p = realloc_unlocked(&malloc_ctx, ptr, size); 753 malloc_unlock(&malloc_ctx, exceptions); 754 return p; 755 } 756 757 void *memalign(size_t alignment, size_t size) 758 { 759 void *p; 760 uint32_t exceptions = malloc_lock(&malloc_ctx); 761 762 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 763 malloc_unlock(&malloc_ctx, exceptions); 764 return p; 765 } 766 767 static void *get_payload_start_size(void *ptr, size_t *size) 768 { 769 *size = bget_buf_size(ptr); 770 return ptr; 771 } 772 773 #endif 774 775 void free(void *ptr) 776 { 777 free_helper(ptr, false); 778 } 779 780 void free_wipe(void *ptr) 781 { 782 free_helper(ptr, true); 783 } 784 785 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 786 { 787 void *p; 788 size_t l; 789 uint32_t exceptions; 790 uintptr_t start = (uintptr_t)buf; 791 uintptr_t end = start + len; 792 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 793 794 start = ROUNDUP(start, SizeQuant); 795 end = ROUNDDOWN(end, SizeQuant); 796 797 if (start > end || (end - start) < min_len) { 798 DMSG("Skipping too small pool"); 799 return; 800 } 801 802 /* First pool requires a bigger size */ 803 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 804 DMSG("Skipping too small initial pool"); 805 return; 806 } 807 808 exceptions = malloc_lock(ctx); 809 810 tag_asan_free((void *)start, end - start); 811 bpool((void *)start, end - start, &ctx->poolset); 812 l = ctx->pool_len + 1; 813 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 814 assert(p); 815 ctx->pool = p; 816 ctx->pool[ctx->pool_len].buf = (void *)start; 817 ctx->pool[ctx->pool_len].len = end - start; 818 #ifdef BufStats 819 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 820 #endif 821 ctx->pool_len = l; 822 malloc_unlock(ctx, exceptions); 823 } 824 825 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 826 void *buf, size_t len) 827 { 828 struct bpool_iterator itr; 829 void *b; 830 uint8_t *start_buf = buf; 831 uint8_t *end_buf = start_buf + len; 832 bool ret = false; 833 uint32_t exceptions = malloc_lock(ctx); 834 835 raw_malloc_validate_pools(ctx); 836 837 /* Check for wrapping */ 838 if (start_buf > end_buf) 839 goto out; 840 841 BPOOL_FOREACH(ctx, &itr, &b) { 842 uint8_t *start_b; 843 uint8_t *end_b; 844 size_t s; 845 846 start_b = get_payload_start_size(b, &s); 847 end_b = start_b + s; 848 849 if (start_buf >= start_b && end_buf <= end_b) { 850 ret = true; 851 goto out; 852 } 853 } 854 855 out: 856 malloc_unlock(ctx, exceptions); 857 858 return ret; 859 } 860 861 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 862 void *buf, size_t len) 863 { 864 uintptr_t buf_start = (uintptr_t) buf; 865 uintptr_t buf_end = buf_start + len; 866 size_t n; 867 bool ret = false; 868 uint32_t exceptions = malloc_lock(ctx); 869 870 raw_malloc_validate_pools(ctx); 871 872 for (n = 0; n < ctx->pool_len; n++) { 873 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 874 uintptr_t pool_end = pool_start + ctx->pool[n].len; 875 876 if (buf_start > buf_end || pool_start > pool_end) { 877 ret = true; /* Wrapping buffers, shouldn't happen */ 878 goto out; 879 } 880 881 if (buf_end > pool_start || buf_start < pool_end) { 882 ret = true; 883 goto out; 884 } 885 } 886 887 out: 888 malloc_unlock(ctx, exceptions); 889 return ret; 890 } 891 892 size_t raw_malloc_get_ctx_size(void) 893 { 894 return sizeof(struct malloc_ctx); 895 } 896 897 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 898 { 899 memset(ctx, 0, sizeof(*ctx)); 900 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 901 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 902 } 903 904 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 905 { 906 gen_malloc_add_pool(ctx, buf, len); 907 } 908 909 #ifdef CFG_WITH_STATS 910 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats) 911 { 912 gen_malloc_get_stats(ctx, stats); 913 } 914 #endif 915 916 void malloc_add_pool(void *buf, size_t len) 917 { 918 gen_malloc_add_pool(&malloc_ctx, buf, len); 919 } 920 921 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 922 { 923 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 924 } 925 926 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 927 { 928 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 929 } 930 931 #ifdef CFG_VIRTUALIZATION 932 933 #ifndef ENABLE_MDBG 934 935 void *nex_malloc(size_t size) 936 { 937 void *p; 938 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 939 940 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 941 malloc_unlock(&nex_malloc_ctx, exceptions); 942 return p; 943 } 944 945 void *nex_calloc(size_t nmemb, size_t size) 946 { 947 void *p; 948 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 949 950 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 951 malloc_unlock(&nex_malloc_ctx, exceptions); 952 return p; 953 } 954 955 void *nex_realloc(void *ptr, size_t size) 956 { 957 void *p; 958 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 959 960 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 961 malloc_unlock(&nex_malloc_ctx, exceptions); 962 return p; 963 } 964 965 void *nex_memalign(size_t alignment, size_t size) 966 { 967 void *p; 968 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 969 970 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 971 malloc_unlock(&nex_malloc_ctx, exceptions); 972 return p; 973 } 974 975 void nex_free(void *ptr) 976 { 977 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 978 979 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 980 malloc_unlock(&nex_malloc_ctx, exceptions); 981 } 982 983 #else /* ENABLE_MDBG */ 984 985 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 986 { 987 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 988 } 989 990 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 991 { 992 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 993 } 994 995 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 996 { 997 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 998 } 999 1000 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1001 size_t size) 1002 { 1003 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1004 } 1005 1006 void nex_mdbg_check(int bufdump) 1007 { 1008 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1009 } 1010 1011 void nex_free(void *ptr) 1012 { 1013 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1014 1015 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1016 malloc_unlock(&nex_malloc_ctx, exceptions); 1017 } 1018 1019 #endif /* ENABLE_MDBG */ 1020 1021 void nex_malloc_add_pool(void *buf, size_t len) 1022 { 1023 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1024 } 1025 1026 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1027 { 1028 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1029 } 1030 1031 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1032 { 1033 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1034 } 1035 1036 #ifdef BufStats 1037 1038 void nex_malloc_reset_stats(void) 1039 { 1040 gen_malloc_reset_stats(&nex_malloc_ctx); 1041 } 1042 1043 void nex_malloc_get_stats(struct malloc_stats *stats) 1044 { 1045 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1046 } 1047 1048 #endif 1049 1050 #endif 1051