1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <stdlib_ext.h> 86 #include <string.h> 87 #include <trace.h> 88 #include <util.h> 89 90 #if defined(__KERNEL__) 91 /* Compiling for TEE Core */ 92 #include <kernel/asan.h> 93 #include <kernel/thread.h> 94 #include <kernel/spinlock.h> 95 #include <kernel/unwind.h> 96 97 static void tag_asan_free(void *buf, size_t len) 98 { 99 asan_tag_heap_free(buf, (uint8_t *)buf + len); 100 } 101 102 static void tag_asan_alloced(void *buf, size_t len) 103 { 104 asan_tag_access(buf, (uint8_t *)buf + len); 105 } 106 107 static void *memset_unchecked(void *s, int c, size_t n) 108 { 109 return asan_memset_unchecked(s, c, n); 110 } 111 112 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 113 size_t n) 114 { 115 return asan_memcpy_unchecked(dst, src, n); 116 } 117 118 #else /*__KERNEL__*/ 119 /* Compiling for TA */ 120 121 static void tag_asan_free(void *buf __unused, size_t len __unused) 122 { 123 } 124 125 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 126 { 127 } 128 129 static void *memset_unchecked(void *s, int c, size_t n) 130 { 131 return memset(s, c, n); 132 } 133 134 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 135 size_t n) 136 { 137 return memcpy(dst, src, n); 138 } 139 140 #endif /*__KERNEL__*/ 141 142 #include "bget.c" /* this is ugly, but this is bget */ 143 144 struct malloc_pool { 145 void *buf; 146 size_t len; 147 }; 148 149 struct malloc_ctx { 150 struct bpoolset poolset; 151 struct malloc_pool *pool; 152 size_t pool_len; 153 #ifdef BufStats 154 struct malloc_stats mstats; 155 #endif 156 #ifdef __KERNEL__ 157 unsigned int spinlock; 158 #endif 159 }; 160 161 #ifdef __KERNEL__ 162 163 static uint32_t malloc_lock(struct malloc_ctx *ctx) 164 { 165 return cpu_spin_lock_xsave(&ctx->spinlock); 166 } 167 168 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 169 { 170 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 171 } 172 173 #else /* __KERNEL__ */ 174 175 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 176 { 177 return 0; 178 } 179 180 static void malloc_unlock(struct malloc_ctx *ctx __unused, 181 uint32_t exceptions __unused) 182 { 183 } 184 185 #endif /* __KERNEL__ */ 186 187 #define DEFINE_CTX(name) struct malloc_ctx name = \ 188 { .poolset = { .freelist = { {0, 0}, \ 189 {&name.poolset.freelist, \ 190 &name.poolset.freelist}}}} 191 192 static DEFINE_CTX(malloc_ctx); 193 194 #ifdef CFG_VIRTUALIZATION 195 static __nex_data DEFINE_CTX(nex_malloc_ctx); 196 #endif 197 198 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 199 { 200 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 201 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 202 print_kernel_stack(); 203 #endif 204 } 205 206 #ifdef BufStats 207 208 static void raw_malloc_return_hook(void *p, size_t requested_size, 209 struct malloc_ctx *ctx) 210 { 211 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 212 ctx->mstats.max_allocated = ctx->poolset.totalloc; 213 214 if (!p) { 215 ctx->mstats.num_alloc_fail++; 216 print_oom(requested_size, ctx); 217 if (requested_size > ctx->mstats.biggest_alloc_fail) { 218 ctx->mstats.biggest_alloc_fail = requested_size; 219 ctx->mstats.biggest_alloc_fail_used = 220 ctx->poolset.totalloc; 221 } 222 } 223 } 224 225 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 226 { 227 uint32_t exceptions = malloc_lock(ctx); 228 229 ctx->mstats.max_allocated = 0; 230 ctx->mstats.num_alloc_fail = 0; 231 ctx->mstats.biggest_alloc_fail = 0; 232 ctx->mstats.biggest_alloc_fail_used = 0; 233 malloc_unlock(ctx, exceptions); 234 } 235 236 void malloc_reset_stats(void) 237 { 238 gen_malloc_reset_stats(&malloc_ctx); 239 } 240 241 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 242 struct malloc_stats *stats) 243 { 244 uint32_t exceptions = malloc_lock(ctx); 245 246 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 247 stats->allocated = ctx->poolset.totalloc; 248 malloc_unlock(ctx, exceptions); 249 } 250 251 void malloc_get_stats(struct malloc_stats *stats) 252 { 253 gen_malloc_get_stats(&malloc_ctx, stats); 254 } 255 256 #else /* BufStats */ 257 258 static void raw_malloc_return_hook(void *p, size_t requested_size, 259 struct malloc_ctx *ctx ) 260 { 261 if (!p) 262 print_oom(requested_size, ctx); 263 } 264 265 #endif /* BufStats */ 266 267 #ifdef BufValid 268 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 269 { 270 size_t n; 271 272 for (n = 0; n < ctx->pool_len; n++) 273 bpoolv(ctx->pool[n].buf); 274 } 275 #else 276 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 277 { 278 } 279 #endif 280 281 struct bpool_iterator { 282 struct bfhead *next_buf; 283 size_t pool_idx; 284 }; 285 286 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 287 struct bpool_iterator *iterator) 288 { 289 iterator->pool_idx = 0; 290 iterator->next_buf = BFH(ctx->pool[0].buf); 291 } 292 293 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 294 size_t *len, bool *isfree) 295 { 296 struct bfhead *b = iterator->next_buf; 297 bufsize bs = b->bh.bsize; 298 299 if (bs == ESent) 300 return false; 301 302 if (bs < 0) { 303 /* Allocated buffer */ 304 bs = -bs; 305 306 *isfree = false; 307 } else { 308 /* Free Buffer */ 309 *isfree = true; 310 311 /* Assert that the free list links are intact */ 312 assert(b->ql.blink->ql.flink == b); 313 assert(b->ql.flink->ql.blink == b); 314 } 315 316 *buf = (uint8_t *)b + sizeof(struct bhead); 317 *len = bs - sizeof(struct bhead); 318 319 iterator->next_buf = BFH((uint8_t *)b + bs); 320 return true; 321 } 322 323 static bool bpool_foreach(struct malloc_ctx *ctx, 324 struct bpool_iterator *iterator, void **buf) 325 { 326 while (true) { 327 size_t len; 328 bool isfree; 329 330 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 331 if (isfree) 332 continue; 333 return true; 334 } 335 336 if ((iterator->pool_idx + 1) >= ctx->pool_len) 337 return false; 338 339 iterator->pool_idx++; 340 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 341 } 342 } 343 344 /* Convenience macro for looping over all allocated buffers */ 345 #define BPOOL_FOREACH(ctx, iterator, bp) \ 346 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 347 bpool_foreach((ctx),(iterator), (bp));) 348 349 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 350 size_t pl_size, struct malloc_ctx *ctx) 351 { 352 void *ptr = NULL; 353 bufsize s; 354 355 if (!alignment || !IS_POWER_OF_TWO(alignment)) 356 return NULL; 357 358 raw_malloc_validate_pools(ctx); 359 360 /* Compute total size, excluding the header */ 361 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 362 goto out; 363 364 /* BGET doesn't like 0 sized allocations */ 365 if (!s) 366 s++; 367 368 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 369 out: 370 raw_malloc_return_hook(ptr, pl_size, ctx); 371 372 return ptr; 373 } 374 375 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 376 struct malloc_ctx *ctx) 377 { 378 /* 379 * Note that we're feeding SizeQ as alignment, this is the smallest 380 * alignment that bget() can use. 381 */ 382 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 383 } 384 385 static void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 386 { 387 raw_malloc_validate_pools(ctx); 388 389 if (ptr) 390 brel(ptr, &ctx->poolset, wipe); 391 } 392 393 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 394 size_t pl_size, struct malloc_ctx *ctx) 395 { 396 void *ptr = NULL; 397 bufsize s; 398 399 raw_malloc_validate_pools(ctx); 400 401 /* Compute total size, excluding hdr_size */ 402 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 403 goto out; 404 if (ADD_OVERFLOW(s, ftr_size, &s)) 405 goto out; 406 407 /* BGET doesn't like 0 sized allocations */ 408 if (!s) 409 s++; 410 411 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 412 out: 413 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 414 415 return ptr; 416 } 417 418 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 419 size_t pl_size, struct malloc_ctx *ctx) 420 { 421 void *p = NULL; 422 bufsize s; 423 424 /* Compute total size */ 425 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 426 goto out; 427 if (ADD_OVERFLOW(s, ftr_size, &s)) 428 goto out; 429 430 raw_malloc_validate_pools(ctx); 431 432 /* BGET doesn't like 0 sized allocations */ 433 if (!s) 434 s++; 435 436 p = bgetr(ptr, 0, 0, s, &ctx->poolset); 437 out: 438 raw_malloc_return_hook(p, pl_size, ctx); 439 440 return p; 441 } 442 443 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 444 static __maybe_unused bufsize bget_buf_size(void *buf) 445 { 446 bufsize osize; /* Old size of buffer */ 447 struct bhead *b; 448 449 b = BH(((char *)buf) - sizeof(struct bhead)); 450 osize = -b->bsize; 451 #ifdef BECtl 452 if (osize == 0) { 453 /* Buffer acquired directly through acqfcn. */ 454 struct bdhead *bd; 455 456 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 457 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 458 } else 459 #endif 460 osize -= sizeof(struct bhead); 461 assert(osize > 0); 462 return osize; 463 } 464 465 #ifdef ENABLE_MDBG 466 467 struct mdbg_hdr { 468 const char *fname; 469 uint16_t line; 470 uint32_t pl_size; 471 uint32_t magic; 472 #if defined(ARM64) 473 uint64_t pad; 474 #endif 475 }; 476 477 #define MDBG_HEADER_MAGIC 0xadadadad 478 #define MDBG_FOOTER_MAGIC 0xecececec 479 480 static size_t mdbg_get_ftr_size(size_t pl_size) 481 { 482 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 483 484 return ftr_pad + sizeof(uint32_t); 485 } 486 487 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 488 { 489 uint32_t *footer; 490 491 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 492 mdbg_get_ftr_size(hdr->pl_size)); 493 footer--; 494 return footer; 495 } 496 497 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 498 int lineno, size_t pl_size) 499 { 500 uint32_t *footer; 501 502 hdr->fname = fname; 503 hdr->line = lineno; 504 hdr->pl_size = pl_size; 505 hdr->magic = MDBG_HEADER_MAGIC; 506 507 footer = mdbg_get_footer(hdr); 508 *footer = MDBG_FOOTER_MAGIC; 509 } 510 511 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 512 int lineno, size_t size) 513 { 514 struct mdbg_hdr *hdr; 515 uint32_t exceptions = malloc_lock(ctx); 516 517 /* 518 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 519 */ 520 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 521 522 hdr = raw_malloc(sizeof(struct mdbg_hdr), 523 mdbg_get_ftr_size(size), size, ctx); 524 if (hdr) { 525 mdbg_update_hdr(hdr, fname, lineno, size); 526 hdr++; 527 } 528 529 malloc_unlock(ctx, exceptions); 530 return hdr; 531 } 532 533 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 534 { 535 assert(hdr->magic == MDBG_HEADER_MAGIC); 536 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 537 } 538 539 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 540 { 541 struct mdbg_hdr *hdr = ptr; 542 543 if (hdr) { 544 hdr--; 545 assert_header(hdr); 546 hdr->magic = 0; 547 *mdbg_get_footer(hdr) = 0; 548 raw_free(hdr, ctx, wipe); 549 } 550 } 551 552 static void free_helper(void *ptr, bool wipe) 553 { 554 uint32_t exceptions = malloc_lock(&malloc_ctx); 555 556 gen_mdbg_free(&malloc_ctx, ptr, wipe); 557 malloc_unlock(&malloc_ctx, exceptions); 558 } 559 560 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 561 size_t nmemb, size_t size) 562 { 563 struct mdbg_hdr *hdr; 564 uint32_t exceptions = malloc_lock(ctx); 565 566 hdr = raw_calloc(sizeof(struct mdbg_hdr), 567 mdbg_get_ftr_size(nmemb * size), nmemb, size, 568 ctx); 569 if (hdr) { 570 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 571 hdr++; 572 } 573 malloc_unlock(ctx, exceptions); 574 return hdr; 575 } 576 577 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 578 int lineno, void *ptr, size_t size) 579 { 580 struct mdbg_hdr *hdr = ptr; 581 582 if (hdr) { 583 hdr--; 584 assert_header(hdr); 585 } 586 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 587 mdbg_get_ftr_size(size), size, ctx); 588 if (hdr) { 589 mdbg_update_hdr(hdr, fname, lineno, size); 590 hdr++; 591 } 592 return hdr; 593 } 594 595 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 596 int lineno, void *ptr, size_t size) 597 { 598 void *p; 599 uint32_t exceptions = malloc_lock(ctx); 600 601 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 602 malloc_unlock(ctx, exceptions); 603 return p; 604 } 605 606 #define realloc_unlocked(ctx, ptr, size) \ 607 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 608 609 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 610 int lineno, size_t alignment, size_t size) 611 { 612 struct mdbg_hdr *hdr; 613 uint32_t exceptions = malloc_lock(ctx); 614 615 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 616 alignment, size, ctx); 617 if (hdr) { 618 mdbg_update_hdr(hdr, fname, lineno, size); 619 hdr++; 620 } 621 malloc_unlock(ctx, exceptions); 622 return hdr; 623 } 624 625 626 static void *get_payload_start_size(void *raw_buf, size_t *size) 627 { 628 struct mdbg_hdr *hdr = raw_buf; 629 630 assert(bget_buf_size(hdr) >= hdr->pl_size); 631 *size = hdr->pl_size; 632 return hdr + 1; 633 } 634 635 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 636 { 637 struct bpool_iterator itr; 638 void *b; 639 uint32_t exceptions = malloc_lock(ctx); 640 641 raw_malloc_validate_pools(ctx); 642 643 BPOOL_FOREACH(ctx, &itr, &b) { 644 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 645 646 assert_header(hdr); 647 648 if (bufdump > 0) { 649 const char *fname = hdr->fname; 650 651 if (!fname) 652 fname = "unknown"; 653 654 IMSG("buffer: %d bytes %s:%d\n", 655 hdr->pl_size, fname, hdr->line); 656 } 657 } 658 659 malloc_unlock(ctx, exceptions); 660 } 661 662 void *mdbg_malloc(const char *fname, int lineno, size_t size) 663 { 664 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 665 } 666 667 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 668 { 669 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 670 } 671 672 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 673 { 674 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 675 } 676 677 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 678 size_t size) 679 { 680 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 681 } 682 683 void mdbg_check(int bufdump) 684 { 685 gen_mdbg_check(&malloc_ctx, bufdump); 686 } 687 #else 688 689 void *malloc(size_t size) 690 { 691 void *p; 692 uint32_t exceptions = malloc_lock(&malloc_ctx); 693 694 p = raw_malloc(0, 0, size, &malloc_ctx); 695 malloc_unlock(&malloc_ctx, exceptions); 696 return p; 697 } 698 699 static void free_helper(void *ptr, bool wipe) 700 { 701 uint32_t exceptions = malloc_lock(&malloc_ctx); 702 703 raw_free(ptr, &malloc_ctx, wipe); 704 malloc_unlock(&malloc_ctx, exceptions); 705 } 706 707 void *calloc(size_t nmemb, size_t size) 708 { 709 void *p; 710 uint32_t exceptions = malloc_lock(&malloc_ctx); 711 712 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 713 malloc_unlock(&malloc_ctx, exceptions); 714 return p; 715 } 716 717 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 718 size_t size) 719 { 720 return raw_realloc(ptr, 0, 0, size, ctx); 721 } 722 723 void *realloc(void *ptr, size_t size) 724 { 725 void *p; 726 uint32_t exceptions = malloc_lock(&malloc_ctx); 727 728 p = realloc_unlocked(&malloc_ctx, ptr, size); 729 malloc_unlock(&malloc_ctx, exceptions); 730 return p; 731 } 732 733 void *memalign(size_t alignment, size_t size) 734 { 735 void *p; 736 uint32_t exceptions = malloc_lock(&malloc_ctx); 737 738 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 739 malloc_unlock(&malloc_ctx, exceptions); 740 return p; 741 } 742 743 static void *get_payload_start_size(void *ptr, size_t *size) 744 { 745 *size = bget_buf_size(ptr); 746 return ptr; 747 } 748 749 #endif 750 751 void free(void *ptr) 752 { 753 free_helper(ptr, false); 754 } 755 756 void free_wipe(void *ptr) 757 { 758 free_helper(ptr, true); 759 } 760 761 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 762 { 763 void *p; 764 size_t l; 765 uint32_t exceptions; 766 uintptr_t start = (uintptr_t)buf; 767 uintptr_t end = start + len; 768 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 769 770 start = ROUNDUP(start, SizeQuant); 771 end = ROUNDDOWN(end, SizeQuant); 772 773 if (start > end || (end - start) < min_len) { 774 DMSG("Skipping too small pool"); 775 return; 776 } 777 778 /* First pool requires a bigger size */ 779 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 780 DMSG("Skipping too small initial pool"); 781 return; 782 } 783 784 exceptions = malloc_lock(ctx); 785 786 tag_asan_free((void *)start, end - start); 787 bpool((void *)start, end - start, &ctx->poolset); 788 l = ctx->pool_len + 1; 789 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 790 assert(p); 791 ctx->pool = p; 792 ctx->pool[ctx->pool_len].buf = (void *)start; 793 ctx->pool[ctx->pool_len].len = end - start; 794 #ifdef BufStats 795 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 796 #endif 797 ctx->pool_len = l; 798 malloc_unlock(ctx, exceptions); 799 } 800 801 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 802 void *buf, size_t len) 803 { 804 struct bpool_iterator itr; 805 void *b; 806 uint8_t *start_buf = buf; 807 uint8_t *end_buf = start_buf + len; 808 bool ret = false; 809 uint32_t exceptions = malloc_lock(ctx); 810 811 raw_malloc_validate_pools(ctx); 812 813 /* Check for wrapping */ 814 if (start_buf > end_buf) 815 goto out; 816 817 BPOOL_FOREACH(ctx, &itr, &b) { 818 uint8_t *start_b; 819 uint8_t *end_b; 820 size_t s; 821 822 start_b = get_payload_start_size(b, &s); 823 end_b = start_b + s; 824 825 if (start_buf >= start_b && end_buf <= end_b) { 826 ret = true; 827 goto out; 828 } 829 } 830 831 out: 832 malloc_unlock(ctx, exceptions); 833 834 return ret; 835 } 836 837 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 838 void *buf, size_t len) 839 { 840 uintptr_t buf_start = (uintptr_t) buf; 841 uintptr_t buf_end = buf_start + len; 842 size_t n; 843 bool ret = false; 844 uint32_t exceptions = malloc_lock(ctx); 845 846 raw_malloc_validate_pools(ctx); 847 848 for (n = 0; n < ctx->pool_len; n++) { 849 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 850 uintptr_t pool_end = pool_start + ctx->pool[n].len; 851 852 if (buf_start > buf_end || pool_start > pool_end) { 853 ret = true; /* Wrapping buffers, shouldn't happen */ 854 goto out; 855 } 856 857 if (buf_end > pool_start || buf_start < pool_end) { 858 ret = true; 859 goto out; 860 } 861 } 862 863 out: 864 malloc_unlock(ctx, exceptions); 865 return ret; 866 } 867 868 void malloc_add_pool(void *buf, size_t len) 869 { 870 gen_malloc_add_pool(&malloc_ctx, buf, len); 871 } 872 873 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 874 { 875 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 876 } 877 878 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 879 { 880 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 881 } 882 883 #ifdef CFG_VIRTUALIZATION 884 885 #ifndef ENABLE_MDBG 886 887 void *nex_malloc(size_t size) 888 { 889 void *p; 890 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 891 892 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 893 malloc_unlock(&nex_malloc_ctx, exceptions); 894 return p; 895 } 896 897 void *nex_calloc(size_t nmemb, size_t size) 898 { 899 void *p; 900 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 901 902 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 903 malloc_unlock(&nex_malloc_ctx, exceptions); 904 return p; 905 } 906 907 void *nex_realloc(void *ptr, size_t size) 908 { 909 void *p; 910 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 911 912 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 913 malloc_unlock(&nex_malloc_ctx, exceptions); 914 return p; 915 } 916 917 void *nex_memalign(size_t alignment, size_t size) 918 { 919 void *p; 920 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 921 922 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 923 malloc_unlock(&nex_malloc_ctx, exceptions); 924 return p; 925 } 926 927 void nex_free(void *ptr) 928 { 929 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 930 931 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 932 malloc_unlock(&nex_malloc_ctx, exceptions); 933 } 934 935 #else /* ENABLE_MDBG */ 936 937 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 938 { 939 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 940 } 941 942 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 943 { 944 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 945 } 946 947 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 948 { 949 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 950 } 951 952 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 953 size_t size) 954 { 955 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 956 } 957 958 void nex_mdbg_check(int bufdump) 959 { 960 gen_mdbg_check(&nex_malloc_ctx, bufdump); 961 } 962 963 void nex_free(void *ptr) 964 { 965 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 966 967 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 968 malloc_unlock(&nex_malloc_ctx, exceptions); 969 } 970 971 #endif /* ENABLE_MDBG */ 972 973 void nex_malloc_add_pool(void *buf, size_t len) 974 { 975 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 976 } 977 978 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 979 { 980 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 981 } 982 983 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 984 { 985 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 986 } 987 988 #ifdef BufStats 989 990 void nex_malloc_reset_stats(void) 991 { 992 gen_malloc_reset_stats(&nex_malloc_ctx); 993 } 994 995 void nex_malloc_get_stats(struct malloc_stats *stats) 996 { 997 gen_malloc_get_stats(&nex_malloc_ctx, stats); 998 } 999 1000 #endif 1001 1002 #endif 1003