1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <stdlib_ext.h> 86 #include <string.h> 87 #include <trace.h> 88 #include <util.h> 89 90 #if defined(__KERNEL__) 91 /* Compiling for TEE Core */ 92 #include <kernel/asan.h> 93 #include <kernel/thread.h> 94 #include <kernel/spinlock.h> 95 #include <kernel/unwind.h> 96 97 static void tag_asan_free(void *buf, size_t len) 98 { 99 asan_tag_heap_free(buf, (uint8_t *)buf + len); 100 } 101 102 static void tag_asan_alloced(void *buf, size_t len) 103 { 104 asan_tag_access(buf, (uint8_t *)buf + len); 105 } 106 107 static void *memset_unchecked(void *s, int c, size_t n) 108 { 109 return asan_memset_unchecked(s, c, n); 110 } 111 112 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 113 size_t n) 114 { 115 return asan_memcpy_unchecked(dst, src, n); 116 } 117 118 #else /*__KERNEL__*/ 119 /* Compiling for TA */ 120 121 static void tag_asan_free(void *buf __unused, size_t len __unused) 122 { 123 } 124 125 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 126 { 127 } 128 129 static void *memset_unchecked(void *s, int c, size_t n) 130 { 131 return memset(s, c, n); 132 } 133 134 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 135 size_t n) 136 { 137 return memcpy(dst, src, n); 138 } 139 140 #endif /*__KERNEL__*/ 141 142 #include "bget.c" /* this is ugly, but this is bget */ 143 144 struct malloc_pool { 145 void *buf; 146 size_t len; 147 }; 148 149 struct malloc_ctx { 150 struct bpoolset poolset; 151 struct malloc_pool *pool; 152 size_t pool_len; 153 #ifdef BufStats 154 struct malloc_stats mstats; 155 #endif 156 #ifdef __KERNEL__ 157 unsigned int spinlock; 158 #endif 159 }; 160 161 #ifdef __KERNEL__ 162 163 static uint32_t malloc_lock(struct malloc_ctx *ctx) 164 { 165 return cpu_spin_lock_xsave(&ctx->spinlock); 166 } 167 168 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 169 { 170 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 171 } 172 173 #else /* __KERNEL__ */ 174 175 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 176 { 177 return 0; 178 } 179 180 static void malloc_unlock(struct malloc_ctx *ctx __unused, 181 uint32_t exceptions __unused) 182 { 183 } 184 185 #endif /* __KERNEL__ */ 186 187 #define DEFINE_CTX(name) struct malloc_ctx name = \ 188 { .poolset = { .freelist = { {0, 0}, \ 189 {&name.poolset.freelist, \ 190 &name.poolset.freelist}}}} 191 192 static DEFINE_CTX(malloc_ctx); 193 194 #ifdef CFG_VIRTUALIZATION 195 static __nex_data DEFINE_CTX(nex_malloc_ctx); 196 #endif 197 198 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 199 { 200 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 201 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 202 print_kernel_stack(); 203 #endif 204 } 205 206 #ifdef BufStats 207 208 static void raw_malloc_return_hook(void *p, size_t requested_size, 209 struct malloc_ctx *ctx) 210 { 211 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 212 ctx->mstats.max_allocated = ctx->poolset.totalloc; 213 214 if (!p) { 215 ctx->mstats.num_alloc_fail++; 216 print_oom(requested_size, ctx); 217 if (requested_size > ctx->mstats.biggest_alloc_fail) { 218 ctx->mstats.biggest_alloc_fail = requested_size; 219 ctx->mstats.biggest_alloc_fail_used = 220 ctx->poolset.totalloc; 221 } 222 } 223 } 224 225 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 226 { 227 uint32_t exceptions = malloc_lock(ctx); 228 229 ctx->mstats.max_allocated = 0; 230 ctx->mstats.num_alloc_fail = 0; 231 ctx->mstats.biggest_alloc_fail = 0; 232 ctx->mstats.biggest_alloc_fail_used = 0; 233 malloc_unlock(ctx, exceptions); 234 } 235 236 void malloc_reset_stats(void) 237 { 238 gen_malloc_reset_stats(&malloc_ctx); 239 } 240 241 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 242 struct malloc_stats *stats) 243 { 244 uint32_t exceptions = malloc_lock(ctx); 245 246 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 247 stats->allocated = ctx->poolset.totalloc; 248 malloc_unlock(ctx, exceptions); 249 } 250 251 void malloc_get_stats(struct malloc_stats *stats) 252 { 253 gen_malloc_get_stats(&malloc_ctx, stats); 254 } 255 256 #else /* BufStats */ 257 258 static void raw_malloc_return_hook(void *p, size_t requested_size, 259 struct malloc_ctx *ctx ) 260 { 261 if (!p) 262 print_oom(requested_size, ctx); 263 } 264 265 #endif /* BufStats */ 266 267 #ifdef BufValid 268 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 269 { 270 size_t n; 271 272 for (n = 0; n < ctx->pool_len; n++) 273 bpoolv(ctx->pool[n].buf); 274 } 275 #else 276 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 277 { 278 } 279 #endif 280 281 struct bpool_iterator { 282 struct bfhead *next_buf; 283 size_t pool_idx; 284 }; 285 286 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 287 struct bpool_iterator *iterator) 288 { 289 iterator->pool_idx = 0; 290 iterator->next_buf = BFH(ctx->pool[0].buf); 291 } 292 293 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 294 size_t *len, bool *isfree) 295 { 296 struct bfhead *b = iterator->next_buf; 297 bufsize bs = b->bh.bsize; 298 299 if (bs == ESent) 300 return false; 301 302 if (bs < 0) { 303 /* Allocated buffer */ 304 bs = -bs; 305 306 *isfree = false; 307 } else { 308 /* Free Buffer */ 309 *isfree = true; 310 311 /* Assert that the free list links are intact */ 312 assert(b->ql.blink->ql.flink == b); 313 assert(b->ql.flink->ql.blink == b); 314 } 315 316 *buf = (uint8_t *)b + sizeof(struct bhead); 317 *len = bs - sizeof(struct bhead); 318 319 iterator->next_buf = BFH((uint8_t *)b + bs); 320 return true; 321 } 322 323 static bool bpool_foreach(struct malloc_ctx *ctx, 324 struct bpool_iterator *iterator, void **buf) 325 { 326 while (true) { 327 size_t len; 328 bool isfree; 329 330 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 331 if (isfree) 332 continue; 333 return true; 334 } 335 336 if ((iterator->pool_idx + 1) >= ctx->pool_len) 337 return false; 338 339 iterator->pool_idx++; 340 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 341 } 342 } 343 344 /* Convenience macro for looping over all allocated buffers */ 345 #define BPOOL_FOREACH(ctx, iterator, bp) \ 346 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 347 bpool_foreach((ctx),(iterator), (bp));) 348 349 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 350 struct malloc_ctx *ctx) 351 { 352 void *ptr = NULL; 353 bufsize s; 354 355 /* 356 * Make sure that malloc has correct alignment of returned buffers. 357 * The assumption is that uintptr_t will be as wide as the largest 358 * required alignment of any type. 359 */ 360 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t)); 361 362 raw_malloc_validate_pools(ctx); 363 364 /* Compute total size */ 365 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 366 goto out; 367 if (ADD_OVERFLOW(s, ftr_size, &s)) 368 goto out; 369 370 /* BGET doesn't like 0 sized allocations */ 371 if (!s) 372 s++; 373 374 ptr = bget(s, &ctx->poolset); 375 out: 376 raw_malloc_return_hook(ptr, pl_size, ctx); 377 378 return ptr; 379 } 380 381 static void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 382 { 383 raw_malloc_validate_pools(ctx); 384 385 if (ptr) 386 brel(ptr, &ctx->poolset, wipe); 387 } 388 389 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 390 size_t pl_size, struct malloc_ctx *ctx) 391 { 392 void *ptr = NULL; 393 bufsize s; 394 395 raw_malloc_validate_pools(ctx); 396 397 /* Compute total size */ 398 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 399 goto out; 400 if (ADD_OVERFLOW(s, hdr_size, &s)) 401 goto out; 402 if (ADD_OVERFLOW(s, ftr_size, &s)) 403 goto out; 404 405 /* BGET doesn't like 0 sized allocations */ 406 if (!s) 407 s++; 408 409 ptr = bgetz(s, &ctx->poolset); 410 out: 411 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 412 413 return ptr; 414 } 415 416 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 417 size_t pl_size, struct malloc_ctx *ctx) 418 { 419 void *p = NULL; 420 bufsize s; 421 422 /* Compute total size */ 423 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 424 goto out; 425 if (ADD_OVERFLOW(s, ftr_size, &s)) 426 goto out; 427 428 raw_malloc_validate_pools(ctx); 429 430 /* BGET doesn't like 0 sized allocations */ 431 if (!s) 432 s++; 433 434 p = bgetr(ptr, s, &ctx->poolset); 435 out: 436 raw_malloc_return_hook(p, pl_size, ctx); 437 438 return p; 439 } 440 441 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 442 static __maybe_unused bufsize bget_buf_size(void *buf) 443 { 444 bufsize osize; /* Old size of buffer */ 445 struct bhead *b; 446 447 b = BH(((char *)buf) - sizeof(struct bhead)); 448 osize = -b->bsize; 449 #ifdef BECtl 450 if (osize == 0) { 451 /* Buffer acquired directly through acqfcn. */ 452 struct bdhead *bd; 453 454 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 455 osize = bd->tsize - sizeof(struct bdhead); 456 } else 457 #endif 458 osize -= sizeof(struct bhead); 459 assert(osize > 0); 460 return osize; 461 } 462 463 #ifdef ENABLE_MDBG 464 465 struct mdbg_hdr { 466 const char *fname; 467 uint16_t line; 468 uint32_t pl_size; 469 uint32_t magic; 470 #if defined(ARM64) 471 uint64_t pad; 472 #endif 473 }; 474 475 #define MDBG_HEADER_MAGIC 0xadadadad 476 #define MDBG_FOOTER_MAGIC 0xecececec 477 478 static size_t mdbg_get_ftr_size(size_t pl_size) 479 { 480 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 481 482 return ftr_pad + sizeof(uint32_t); 483 } 484 485 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 486 { 487 uint32_t *footer; 488 489 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 490 mdbg_get_ftr_size(hdr->pl_size)); 491 footer--; 492 return footer; 493 } 494 495 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 496 int lineno, size_t pl_size) 497 { 498 uint32_t *footer; 499 500 hdr->fname = fname; 501 hdr->line = lineno; 502 hdr->pl_size = pl_size; 503 hdr->magic = MDBG_HEADER_MAGIC; 504 505 footer = mdbg_get_footer(hdr); 506 *footer = MDBG_FOOTER_MAGIC; 507 } 508 509 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 510 int lineno, size_t size) 511 { 512 struct mdbg_hdr *hdr; 513 uint32_t exceptions = malloc_lock(ctx); 514 515 /* 516 * Check struct mdbg_hdr doesn't get bad alignment. 517 * This is required by C standard: the buffer returned from 518 * malloc() should be aligned with a fundamental alignment. 519 * For ARM32, the required alignment is 8. For ARM64, it is 16. 520 */ 521 COMPILE_TIME_ASSERT( 522 (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0); 523 524 hdr = raw_malloc(sizeof(struct mdbg_hdr), 525 mdbg_get_ftr_size(size), size, ctx); 526 if (hdr) { 527 mdbg_update_hdr(hdr, fname, lineno, size); 528 hdr++; 529 } 530 531 malloc_unlock(ctx, exceptions); 532 return hdr; 533 } 534 535 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 536 { 537 assert(hdr->magic == MDBG_HEADER_MAGIC); 538 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 539 } 540 541 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 542 { 543 struct mdbg_hdr *hdr = ptr; 544 545 if (hdr) { 546 hdr--; 547 assert_header(hdr); 548 hdr->magic = 0; 549 *mdbg_get_footer(hdr) = 0; 550 raw_free(hdr, ctx, wipe); 551 } 552 } 553 554 static void free_helper(void *ptr, bool wipe) 555 { 556 uint32_t exceptions = malloc_lock(&malloc_ctx); 557 558 gen_mdbg_free(&malloc_ctx, ptr, wipe); 559 malloc_unlock(&malloc_ctx, exceptions); 560 } 561 562 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 563 size_t nmemb, size_t size) 564 { 565 struct mdbg_hdr *hdr; 566 uint32_t exceptions = malloc_lock(ctx); 567 568 hdr = raw_calloc(sizeof(struct mdbg_hdr), 569 mdbg_get_ftr_size(nmemb * size), nmemb, size, 570 ctx); 571 if (hdr) { 572 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 573 hdr++; 574 } 575 malloc_unlock(ctx, exceptions); 576 return hdr; 577 } 578 579 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 580 int lineno, void *ptr, size_t size) 581 { 582 struct mdbg_hdr *hdr = ptr; 583 584 if (hdr) { 585 hdr--; 586 assert_header(hdr); 587 } 588 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 589 mdbg_get_ftr_size(size), size, ctx); 590 if (hdr) { 591 mdbg_update_hdr(hdr, fname, lineno, size); 592 hdr++; 593 } 594 return hdr; 595 } 596 597 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 598 int lineno, void *ptr, size_t size) 599 { 600 void *p; 601 uint32_t exceptions = malloc_lock(ctx); 602 603 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 604 malloc_unlock(ctx, exceptions); 605 return p; 606 } 607 608 #define realloc_unlocked(ctx, ptr, size) \ 609 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 610 611 static void *get_payload_start_size(void *raw_buf, size_t *size) 612 { 613 struct mdbg_hdr *hdr = raw_buf; 614 615 assert(bget_buf_size(hdr) >= hdr->pl_size); 616 *size = hdr->pl_size; 617 return hdr + 1; 618 } 619 620 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 621 { 622 struct bpool_iterator itr; 623 void *b; 624 uint32_t exceptions = malloc_lock(ctx); 625 626 raw_malloc_validate_pools(ctx); 627 628 BPOOL_FOREACH(ctx, &itr, &b) { 629 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 630 631 assert_header(hdr); 632 633 if (bufdump > 0) { 634 const char *fname = hdr->fname; 635 636 if (!fname) 637 fname = "unknown"; 638 639 IMSG("buffer: %d bytes %s:%d\n", 640 hdr->pl_size, fname, hdr->line); 641 } 642 } 643 644 malloc_unlock(ctx, exceptions); 645 } 646 647 void *mdbg_malloc(const char *fname, int lineno, size_t size) 648 { 649 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 650 } 651 652 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 653 { 654 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 655 } 656 657 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 658 { 659 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 660 } 661 662 void mdbg_check(int bufdump) 663 { 664 gen_mdbg_check(&malloc_ctx, bufdump); 665 } 666 #else 667 668 void *malloc(size_t size) 669 { 670 void *p; 671 uint32_t exceptions = malloc_lock(&malloc_ctx); 672 673 p = raw_malloc(0, 0, size, &malloc_ctx); 674 malloc_unlock(&malloc_ctx, exceptions); 675 return p; 676 } 677 678 static void free_helper(void *ptr, bool wipe) 679 { 680 uint32_t exceptions = malloc_lock(&malloc_ctx); 681 682 raw_free(ptr, &malloc_ctx, wipe); 683 malloc_unlock(&malloc_ctx, exceptions); 684 } 685 686 void *calloc(size_t nmemb, size_t size) 687 { 688 void *p; 689 uint32_t exceptions = malloc_lock(&malloc_ctx); 690 691 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 692 malloc_unlock(&malloc_ctx, exceptions); 693 return p; 694 } 695 696 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 697 size_t size) 698 { 699 return raw_realloc(ptr, 0, 0, size, ctx); 700 } 701 702 void *realloc(void *ptr, size_t size) 703 { 704 void *p; 705 uint32_t exceptions = malloc_lock(&malloc_ctx); 706 707 p = realloc_unlocked(&malloc_ctx, ptr, size); 708 malloc_unlock(&malloc_ctx, exceptions); 709 return p; 710 } 711 712 static void *get_payload_start_size(void *ptr, size_t *size) 713 { 714 *size = bget_buf_size(ptr); 715 return ptr; 716 } 717 718 #endif 719 720 void free(void *ptr) 721 { 722 free_helper(ptr, false); 723 } 724 725 void free_wipe(void *ptr) 726 { 727 free_helper(ptr, true); 728 } 729 730 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 731 { 732 void *p; 733 size_t l; 734 uint32_t exceptions; 735 uintptr_t start = (uintptr_t)buf; 736 uintptr_t end = start + len; 737 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) & 738 (~(SizeQuant - 1))) + 739 sizeof(struct bhead) * 2; 740 741 742 start = ROUNDUP(start, SizeQuant); 743 end = ROUNDDOWN(end, SizeQuant); 744 assert(start < end); 745 746 if ((end - start) < min_len) { 747 DMSG("Skipping too small pool"); 748 return; 749 } 750 751 exceptions = malloc_lock(ctx); 752 753 tag_asan_free((void *)start, end - start); 754 bpool((void *)start, end - start, &ctx->poolset); 755 l = ctx->pool_len + 1; 756 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 757 assert(p); 758 ctx->pool = p; 759 ctx->pool[ctx->pool_len].buf = (void *)start; 760 ctx->pool[ctx->pool_len].len = end - start; 761 #ifdef BufStats 762 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 763 #endif 764 ctx->pool_len = l; 765 malloc_unlock(ctx, exceptions); 766 } 767 768 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 769 void *buf, size_t len) 770 { 771 struct bpool_iterator itr; 772 void *b; 773 uint8_t *start_buf = buf; 774 uint8_t *end_buf = start_buf + len; 775 bool ret = false; 776 uint32_t exceptions = malloc_lock(ctx); 777 778 raw_malloc_validate_pools(ctx); 779 780 /* Check for wrapping */ 781 if (start_buf > end_buf) 782 goto out; 783 784 BPOOL_FOREACH(ctx, &itr, &b) { 785 uint8_t *start_b; 786 uint8_t *end_b; 787 size_t s; 788 789 start_b = get_payload_start_size(b, &s); 790 end_b = start_b + s; 791 792 if (start_buf >= start_b && end_buf <= end_b) { 793 ret = true; 794 goto out; 795 } 796 } 797 798 out: 799 malloc_unlock(ctx, exceptions); 800 801 return ret; 802 } 803 804 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 805 void *buf, size_t len) 806 { 807 uintptr_t buf_start = (uintptr_t) buf; 808 uintptr_t buf_end = buf_start + len; 809 size_t n; 810 bool ret = false; 811 uint32_t exceptions = malloc_lock(ctx); 812 813 raw_malloc_validate_pools(ctx); 814 815 for (n = 0; n < ctx->pool_len; n++) { 816 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 817 uintptr_t pool_end = pool_start + ctx->pool[n].len; 818 819 if (buf_start > buf_end || pool_start > pool_end) { 820 ret = true; /* Wrapping buffers, shouldn't happen */ 821 goto out; 822 } 823 824 if (buf_end > pool_start || buf_start < pool_end) { 825 ret = true; 826 goto out; 827 } 828 } 829 830 out: 831 malloc_unlock(ctx, exceptions); 832 return ret; 833 } 834 835 void malloc_add_pool(void *buf, size_t len) 836 { 837 gen_malloc_add_pool(&malloc_ctx, buf, len); 838 } 839 840 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 841 { 842 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 843 } 844 845 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 846 { 847 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 848 } 849 850 #ifdef CFG_VIRTUALIZATION 851 852 #ifndef ENABLE_MDBG 853 854 void *nex_malloc(size_t size) 855 { 856 void *p; 857 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 858 859 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 860 malloc_unlock(&nex_malloc_ctx, exceptions); 861 return p; 862 } 863 864 void *nex_calloc(size_t nmemb, size_t size) 865 { 866 void *p; 867 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 868 869 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 870 malloc_unlock(&nex_malloc_ctx, exceptions); 871 return p; 872 } 873 874 void *nex_realloc(void *ptr, size_t size) 875 { 876 void *p; 877 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 878 879 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 880 malloc_unlock(&nex_malloc_ctx, exceptions); 881 return p; 882 } 883 884 void nex_free(void *ptr) 885 { 886 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 887 888 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 889 malloc_unlock(&nex_malloc_ctx, exceptions); 890 } 891 892 #else /* ENABLE_MDBG */ 893 894 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 895 { 896 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 897 } 898 899 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 900 { 901 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 902 } 903 904 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 905 { 906 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 907 } 908 909 void nex_mdbg_check(int bufdump) 910 { 911 gen_mdbg_check(&nex_malloc_ctx, bufdump); 912 } 913 914 void nex_free(void *ptr) 915 { 916 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 917 918 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 919 malloc_unlock(&nex_malloc_ctx, exceptions); 920 } 921 922 #endif /* ENABLE_MDBG */ 923 924 void nex_malloc_add_pool(void *buf, size_t len) 925 { 926 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 927 } 928 929 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 930 { 931 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 932 } 933 934 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 935 { 936 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 937 } 938 939 #ifdef BufStats 940 941 void nex_malloc_reset_stats(void) 942 { 943 gen_malloc_reset_stats(&nex_malloc_ctx); 944 } 945 946 void nex_malloc_get_stats(struct malloc_stats *stats) 947 { 948 gen_malloc_get_stats(&nex_malloc_ctx, stats); 949 } 950 951 #endif 952 953 #endif 954