1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <stdlib_ext.h> 86 #include <string.h> 87 #include <trace.h> 88 #include <util.h> 89 90 #if defined(__KERNEL__) 91 /* Compiling for TEE Core */ 92 #include <kernel/asan.h> 93 #include <kernel/thread.h> 94 #include <kernel/spinlock.h> 95 96 static void tag_asan_free(void *buf, size_t len) 97 { 98 asan_tag_heap_free(buf, (uint8_t *)buf + len); 99 } 100 101 static void tag_asan_alloced(void *buf, size_t len) 102 { 103 asan_tag_access(buf, (uint8_t *)buf + len); 104 } 105 106 static void *memset_unchecked(void *s, int c, size_t n) 107 { 108 return asan_memset_unchecked(s, c, n); 109 } 110 111 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 112 size_t n) 113 { 114 return asan_memcpy_unchecked(dst, src, n); 115 } 116 117 #else /*__KERNEL__*/ 118 /* Compiling for TA */ 119 120 static void tag_asan_free(void *buf __unused, size_t len __unused) 121 { 122 } 123 124 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 125 { 126 } 127 128 static void *memset_unchecked(void *s, int c, size_t n) 129 { 130 return memset(s, c, n); 131 } 132 133 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 134 size_t n) 135 { 136 return memcpy(dst, src, n); 137 } 138 139 #endif /*__KERNEL__*/ 140 141 #include "bget.c" /* this is ugly, but this is bget */ 142 143 struct malloc_pool { 144 void *buf; 145 size_t len; 146 }; 147 148 struct malloc_ctx { 149 struct bpoolset poolset; 150 struct malloc_pool *pool; 151 size_t pool_len; 152 #ifdef BufStats 153 struct malloc_stats mstats; 154 #endif 155 #ifdef __KERNEL__ 156 unsigned int spinlock; 157 #endif 158 }; 159 160 #ifdef __KERNEL__ 161 162 static uint32_t malloc_lock(struct malloc_ctx *ctx) 163 { 164 return cpu_spin_lock_xsave(&ctx->spinlock); 165 } 166 167 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 168 { 169 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 170 } 171 172 #else /* __KERNEL__ */ 173 174 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 175 { 176 return 0; 177 } 178 179 static void malloc_unlock(struct malloc_ctx *ctx __unused, 180 uint32_t exceptions __unused) 181 { 182 } 183 184 #endif /* __KERNEL__ */ 185 186 #define DEFINE_CTX(name) struct malloc_ctx name = \ 187 { .poolset = { .freelist = { {0, 0}, \ 188 {&name.poolset.freelist, \ 189 &name.poolset.freelist}}}} 190 191 static DEFINE_CTX(malloc_ctx); 192 193 #ifdef CFG_VIRTUALIZATION 194 static __nex_data DEFINE_CTX(nex_malloc_ctx); 195 #endif 196 197 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 198 { 199 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 200 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 201 EPRINT_STACK(); 202 #endif 203 } 204 205 #ifdef BufStats 206 207 static void raw_malloc_return_hook(void *p, size_t requested_size, 208 struct malloc_ctx *ctx) 209 { 210 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 211 ctx->mstats.max_allocated = ctx->poolset.totalloc; 212 213 if (!p) { 214 ctx->mstats.num_alloc_fail++; 215 print_oom(requested_size, ctx); 216 if (requested_size > ctx->mstats.biggest_alloc_fail) { 217 ctx->mstats.biggest_alloc_fail = requested_size; 218 ctx->mstats.biggest_alloc_fail_used = 219 ctx->poolset.totalloc; 220 } 221 } 222 } 223 224 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 225 { 226 uint32_t exceptions = malloc_lock(ctx); 227 228 ctx->mstats.max_allocated = 0; 229 ctx->mstats.num_alloc_fail = 0; 230 ctx->mstats.biggest_alloc_fail = 0; 231 ctx->mstats.biggest_alloc_fail_used = 0; 232 malloc_unlock(ctx, exceptions); 233 } 234 235 void malloc_reset_stats(void) 236 { 237 gen_malloc_reset_stats(&malloc_ctx); 238 } 239 240 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 241 struct malloc_stats *stats) 242 { 243 uint32_t exceptions = malloc_lock(ctx); 244 245 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 246 stats->allocated = ctx->poolset.totalloc; 247 malloc_unlock(ctx, exceptions); 248 } 249 250 void malloc_get_stats(struct malloc_stats *stats) 251 { 252 gen_malloc_get_stats(&malloc_ctx, stats); 253 } 254 255 #else /* BufStats */ 256 257 static void raw_malloc_return_hook(void *p, size_t requested_size, 258 struct malloc_ctx *ctx ) 259 { 260 if (!p) 261 print_oom(requested_size, ctx); 262 } 263 264 #endif /* BufStats */ 265 266 #ifdef BufValid 267 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 268 { 269 size_t n; 270 271 for (n = 0; n < ctx->pool_len; n++) 272 bpoolv(ctx->pool[n].buf); 273 } 274 #else 275 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 276 { 277 } 278 #endif 279 280 struct bpool_iterator { 281 struct bfhead *next_buf; 282 size_t pool_idx; 283 }; 284 285 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 286 struct bpool_iterator *iterator) 287 { 288 iterator->pool_idx = 0; 289 iterator->next_buf = BFH(ctx->pool[0].buf); 290 } 291 292 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 293 size_t *len, bool *isfree) 294 { 295 struct bfhead *b = iterator->next_buf; 296 bufsize bs = b->bh.bsize; 297 298 if (bs == ESent) 299 return false; 300 301 if (bs < 0) { 302 /* Allocated buffer */ 303 bs = -bs; 304 305 *isfree = false; 306 } else { 307 /* Free Buffer */ 308 *isfree = true; 309 310 /* Assert that the free list links are intact */ 311 assert(b->ql.blink->ql.flink == b); 312 assert(b->ql.flink->ql.blink == b); 313 } 314 315 *buf = (uint8_t *)b + sizeof(struct bhead); 316 *len = bs - sizeof(struct bhead); 317 318 iterator->next_buf = BFH((uint8_t *)b + bs); 319 return true; 320 } 321 322 static bool bpool_foreach(struct malloc_ctx *ctx, 323 struct bpool_iterator *iterator, void **buf) 324 { 325 while (true) { 326 size_t len; 327 bool isfree; 328 329 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 330 if (isfree) 331 continue; 332 return true; 333 } 334 335 if ((iterator->pool_idx + 1) >= ctx->pool_len) 336 return false; 337 338 iterator->pool_idx++; 339 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 340 } 341 } 342 343 /* Convenience macro for looping over all allocated buffers */ 344 #define BPOOL_FOREACH(ctx, iterator, bp) \ 345 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 346 bpool_foreach((ctx),(iterator), (bp));) 347 348 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 349 struct malloc_ctx *ctx) 350 { 351 void *ptr = NULL; 352 bufsize s; 353 354 /* 355 * Make sure that malloc has correct alignment of returned buffers. 356 * The assumption is that uintptr_t will be as wide as the largest 357 * required alignment of any type. 358 */ 359 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t)); 360 361 raw_malloc_validate_pools(ctx); 362 363 /* Compute total size */ 364 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 365 goto out; 366 if (ADD_OVERFLOW(s, ftr_size, &s)) 367 goto out; 368 369 /* BGET doesn't like 0 sized allocations */ 370 if (!s) 371 s++; 372 373 ptr = bget(s, &ctx->poolset); 374 out: 375 raw_malloc_return_hook(ptr, pl_size, ctx); 376 377 return ptr; 378 } 379 380 static void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 381 { 382 raw_malloc_validate_pools(ctx); 383 384 if (ptr) 385 brel(ptr, &ctx->poolset, wipe); 386 } 387 388 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 389 size_t pl_size, struct malloc_ctx *ctx) 390 { 391 void *ptr = NULL; 392 bufsize s; 393 394 raw_malloc_validate_pools(ctx); 395 396 /* Compute total size */ 397 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 398 goto out; 399 if (ADD_OVERFLOW(s, hdr_size, &s)) 400 goto out; 401 if (ADD_OVERFLOW(s, ftr_size, &s)) 402 goto out; 403 404 /* BGET doesn't like 0 sized allocations */ 405 if (!s) 406 s++; 407 408 ptr = bgetz(s, &ctx->poolset); 409 out: 410 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 411 412 return ptr; 413 } 414 415 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 416 size_t pl_size, struct malloc_ctx *ctx) 417 { 418 void *p = NULL; 419 bufsize s; 420 421 /* Compute total size */ 422 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 423 goto out; 424 if (ADD_OVERFLOW(s, ftr_size, &s)) 425 goto out; 426 427 raw_malloc_validate_pools(ctx); 428 429 /* BGET doesn't like 0 sized allocations */ 430 if (!s) 431 s++; 432 433 p = bgetr(ptr, s, &ctx->poolset); 434 out: 435 raw_malloc_return_hook(p, pl_size, ctx); 436 437 return p; 438 } 439 440 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 441 static __maybe_unused bufsize bget_buf_size(void *buf) 442 { 443 bufsize osize; /* Old size of buffer */ 444 struct bhead *b; 445 446 b = BH(((char *)buf) - sizeof(struct bhead)); 447 osize = -b->bsize; 448 #ifdef BECtl 449 if (osize == 0) { 450 /* Buffer acquired directly through acqfcn. */ 451 struct bdhead *bd; 452 453 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 454 osize = bd->tsize - sizeof(struct bdhead); 455 } else 456 #endif 457 osize -= sizeof(struct bhead); 458 assert(osize > 0); 459 return osize; 460 } 461 462 #ifdef ENABLE_MDBG 463 464 struct mdbg_hdr { 465 const char *fname; 466 uint16_t line; 467 uint32_t pl_size; 468 uint32_t magic; 469 #if defined(ARM64) 470 uint64_t pad; 471 #endif 472 }; 473 474 #define MDBG_HEADER_MAGIC 0xadadadad 475 #define MDBG_FOOTER_MAGIC 0xecececec 476 477 static size_t mdbg_get_ftr_size(size_t pl_size) 478 { 479 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 480 481 return ftr_pad + sizeof(uint32_t); 482 } 483 484 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 485 { 486 uint32_t *footer; 487 488 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 489 mdbg_get_ftr_size(hdr->pl_size)); 490 footer--; 491 return footer; 492 } 493 494 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 495 int lineno, size_t pl_size) 496 { 497 uint32_t *footer; 498 499 hdr->fname = fname; 500 hdr->line = lineno; 501 hdr->pl_size = pl_size; 502 hdr->magic = MDBG_HEADER_MAGIC; 503 504 footer = mdbg_get_footer(hdr); 505 *footer = MDBG_FOOTER_MAGIC; 506 } 507 508 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 509 int lineno, size_t size) 510 { 511 struct mdbg_hdr *hdr; 512 uint32_t exceptions = malloc_lock(ctx); 513 514 /* 515 * Check struct mdbg_hdr doesn't get bad alignment. 516 * This is required by C standard: the buffer returned from 517 * malloc() should be aligned with a fundamental alignment. 518 * For ARM32, the required alignment is 8. For ARM64, it is 16. 519 */ 520 COMPILE_TIME_ASSERT( 521 (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0); 522 523 hdr = raw_malloc(sizeof(struct mdbg_hdr), 524 mdbg_get_ftr_size(size), size, ctx); 525 if (hdr) { 526 mdbg_update_hdr(hdr, fname, lineno, size); 527 hdr++; 528 } 529 530 malloc_unlock(ctx, exceptions); 531 return hdr; 532 } 533 534 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 535 { 536 assert(hdr->magic == MDBG_HEADER_MAGIC); 537 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 538 } 539 540 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 541 { 542 struct mdbg_hdr *hdr = ptr; 543 544 if (hdr) { 545 hdr--; 546 assert_header(hdr); 547 hdr->magic = 0; 548 *mdbg_get_footer(hdr) = 0; 549 raw_free(hdr, ctx, wipe); 550 } 551 } 552 553 static void free_helper(void *ptr, bool wipe) 554 { 555 uint32_t exceptions = malloc_lock(&malloc_ctx); 556 557 gen_mdbg_free(&malloc_ctx, ptr, wipe); 558 malloc_unlock(&malloc_ctx, exceptions); 559 } 560 561 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 562 size_t nmemb, size_t size) 563 { 564 struct mdbg_hdr *hdr; 565 uint32_t exceptions = malloc_lock(ctx); 566 567 hdr = raw_calloc(sizeof(struct mdbg_hdr), 568 mdbg_get_ftr_size(nmemb * size), nmemb, size, 569 ctx); 570 if (hdr) { 571 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 572 hdr++; 573 } 574 malloc_unlock(ctx, exceptions); 575 return hdr; 576 } 577 578 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 579 int lineno, void *ptr, size_t size) 580 { 581 struct mdbg_hdr *hdr = ptr; 582 583 if (hdr) { 584 hdr--; 585 assert_header(hdr); 586 } 587 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 588 mdbg_get_ftr_size(size), size, ctx); 589 if (hdr) { 590 mdbg_update_hdr(hdr, fname, lineno, size); 591 hdr++; 592 } 593 return hdr; 594 } 595 596 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 597 int lineno, void *ptr, size_t size) 598 { 599 void *p; 600 uint32_t exceptions = malloc_lock(ctx); 601 602 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 603 malloc_unlock(ctx, exceptions); 604 return p; 605 } 606 607 #define realloc_unlocked(ctx, ptr, size) \ 608 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 609 610 static void *get_payload_start_size(void *raw_buf, size_t *size) 611 { 612 struct mdbg_hdr *hdr = raw_buf; 613 614 assert(bget_buf_size(hdr) >= hdr->pl_size); 615 *size = hdr->pl_size; 616 return hdr + 1; 617 } 618 619 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 620 { 621 struct bpool_iterator itr; 622 void *b; 623 uint32_t exceptions = malloc_lock(ctx); 624 625 raw_malloc_validate_pools(ctx); 626 627 BPOOL_FOREACH(ctx, &itr, &b) { 628 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 629 630 assert_header(hdr); 631 632 if (bufdump > 0) { 633 const char *fname = hdr->fname; 634 635 if (!fname) 636 fname = "unknown"; 637 638 IMSG("buffer: %d bytes %s:%d\n", 639 hdr->pl_size, fname, hdr->line); 640 } 641 } 642 643 malloc_unlock(ctx, exceptions); 644 } 645 646 void *mdbg_malloc(const char *fname, int lineno, size_t size) 647 { 648 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 649 } 650 651 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 652 { 653 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 654 } 655 656 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 657 { 658 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 659 } 660 661 void mdbg_check(int bufdump) 662 { 663 gen_mdbg_check(&malloc_ctx, bufdump); 664 } 665 #else 666 667 void *malloc(size_t size) 668 { 669 void *p; 670 uint32_t exceptions = malloc_lock(&malloc_ctx); 671 672 p = raw_malloc(0, 0, size, &malloc_ctx); 673 malloc_unlock(&malloc_ctx, exceptions); 674 return p; 675 } 676 677 static void free_helper(void *ptr, bool wipe) 678 { 679 uint32_t exceptions = malloc_lock(&malloc_ctx); 680 681 raw_free(ptr, &malloc_ctx, wipe); 682 malloc_unlock(&malloc_ctx, exceptions); 683 } 684 685 void *calloc(size_t nmemb, size_t size) 686 { 687 void *p; 688 uint32_t exceptions = malloc_lock(&malloc_ctx); 689 690 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 691 malloc_unlock(&malloc_ctx, exceptions); 692 return p; 693 } 694 695 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 696 size_t size) 697 { 698 return raw_realloc(ptr, 0, 0, size, ctx); 699 } 700 701 void *realloc(void *ptr, size_t size) 702 { 703 void *p; 704 uint32_t exceptions = malloc_lock(&malloc_ctx); 705 706 p = realloc_unlocked(&malloc_ctx, ptr, size); 707 malloc_unlock(&malloc_ctx, exceptions); 708 return p; 709 } 710 711 static void *get_payload_start_size(void *ptr, size_t *size) 712 { 713 *size = bget_buf_size(ptr); 714 return ptr; 715 } 716 717 #endif 718 719 void free(void *ptr) 720 { 721 free_helper(ptr, false); 722 } 723 724 void free_wipe(void *ptr) 725 { 726 free_helper(ptr, true); 727 } 728 729 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 730 { 731 void *p; 732 size_t l; 733 uint32_t exceptions; 734 uintptr_t start = (uintptr_t)buf; 735 uintptr_t end = start + len; 736 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) & 737 (~(SizeQuant - 1))) + 738 sizeof(struct bhead) * 2; 739 740 741 start = ROUNDUP(start, SizeQuant); 742 end = ROUNDDOWN(end, SizeQuant); 743 assert(start < end); 744 745 if ((end - start) < min_len) { 746 DMSG("Skipping too small pool"); 747 return; 748 } 749 750 exceptions = malloc_lock(ctx); 751 752 tag_asan_free((void *)start, end - start); 753 bpool((void *)start, end - start, &ctx->poolset); 754 l = ctx->pool_len + 1; 755 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 756 assert(p); 757 ctx->pool = p; 758 ctx->pool[ctx->pool_len].buf = (void *)start; 759 ctx->pool[ctx->pool_len].len = end - start; 760 #ifdef BufStats 761 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 762 #endif 763 ctx->pool_len = l; 764 malloc_unlock(ctx, exceptions); 765 } 766 767 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 768 void *buf, size_t len) 769 { 770 struct bpool_iterator itr; 771 void *b; 772 uint8_t *start_buf = buf; 773 uint8_t *end_buf = start_buf + len; 774 bool ret = false; 775 uint32_t exceptions = malloc_lock(ctx); 776 777 raw_malloc_validate_pools(ctx); 778 779 /* Check for wrapping */ 780 if (start_buf > end_buf) 781 goto out; 782 783 BPOOL_FOREACH(ctx, &itr, &b) { 784 uint8_t *start_b; 785 uint8_t *end_b; 786 size_t s; 787 788 start_b = get_payload_start_size(b, &s); 789 end_b = start_b + s; 790 791 if (start_buf >= start_b && end_buf <= end_b) { 792 ret = true; 793 goto out; 794 } 795 } 796 797 out: 798 malloc_unlock(ctx, exceptions); 799 800 return ret; 801 } 802 803 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 804 void *buf, size_t len) 805 { 806 uintptr_t buf_start = (uintptr_t) buf; 807 uintptr_t buf_end = buf_start + len; 808 size_t n; 809 bool ret = false; 810 uint32_t exceptions = malloc_lock(ctx); 811 812 raw_malloc_validate_pools(ctx); 813 814 for (n = 0; n < ctx->pool_len; n++) { 815 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 816 uintptr_t pool_end = pool_start + ctx->pool[n].len; 817 818 if (buf_start > buf_end || pool_start > pool_end) { 819 ret = true; /* Wrapping buffers, shouldn't happen */ 820 goto out; 821 } 822 823 if (buf_end > pool_start || buf_start < pool_end) { 824 ret = true; 825 goto out; 826 } 827 } 828 829 out: 830 malloc_unlock(ctx, exceptions); 831 return ret; 832 } 833 834 void malloc_add_pool(void *buf, size_t len) 835 { 836 gen_malloc_add_pool(&malloc_ctx, buf, len); 837 } 838 839 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 840 { 841 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 842 } 843 844 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 845 { 846 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 847 } 848 849 #ifdef CFG_VIRTUALIZATION 850 851 #ifndef ENABLE_MDBG 852 853 void *nex_malloc(size_t size) 854 { 855 void *p; 856 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 857 858 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 859 malloc_unlock(&nex_malloc_ctx, exceptions); 860 return p; 861 } 862 863 void *nex_calloc(size_t nmemb, size_t size) 864 { 865 void *p; 866 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 867 868 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 869 malloc_unlock(&nex_malloc_ctx, exceptions); 870 return p; 871 } 872 873 void *nex_realloc(void *ptr, size_t size) 874 { 875 void *p; 876 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 877 878 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 879 malloc_unlock(&nex_malloc_ctx, exceptions); 880 return p; 881 } 882 883 void nex_free(void *ptr) 884 { 885 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 886 887 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 888 malloc_unlock(&nex_malloc_ctx, exceptions); 889 } 890 891 #else /* ENABLE_MDBG */ 892 893 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 894 { 895 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 896 } 897 898 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 899 { 900 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 901 } 902 903 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 904 { 905 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 906 } 907 908 void nex_mdbg_check(int bufdump) 909 { 910 gen_mdbg_check(&nex_malloc_ctx, bufdump); 911 } 912 913 void nex_free(void *ptr) 914 { 915 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 916 917 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 918 malloc_unlock(&nex_malloc_ctx, exceptions); 919 } 920 921 #endif /* ENABLE_MDBG */ 922 923 void nex_malloc_add_pool(void *buf, size_t len) 924 { 925 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 926 } 927 928 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 929 { 930 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 931 } 932 933 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 934 { 935 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 936 } 937 938 #ifdef BufStats 939 940 void nex_malloc_reset_stats(void) 941 { 942 gen_malloc_reset_stats(&nex_malloc_ctx); 943 } 944 945 void nex_malloc_get_stats(struct malloc_stats *stats) 946 { 947 gen_malloc_get_stats(&nex_malloc_ctx, stats); 948 } 949 950 #endif 951 952 #endif 953