1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <stdlib_ext.h> 86 #include <string.h> 87 #include <trace.h> 88 #include <util.h> 89 90 #if defined(__KERNEL__) 91 /* Compiling for TEE Core */ 92 #include <kernel/asan.h> 93 #include <kernel/thread.h> 94 #include <kernel/spinlock.h> 95 #include <kernel/unwind.h> 96 97 static void tag_asan_free(void *buf, size_t len) 98 { 99 asan_tag_heap_free(buf, (uint8_t *)buf + len); 100 } 101 102 static void tag_asan_alloced(void *buf, size_t len) 103 { 104 asan_tag_access(buf, (uint8_t *)buf + len); 105 } 106 107 static void *memset_unchecked(void *s, int c, size_t n) 108 { 109 return asan_memset_unchecked(s, c, n); 110 } 111 112 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 113 size_t n) 114 { 115 return asan_memcpy_unchecked(dst, src, n); 116 } 117 118 #else /*__KERNEL__*/ 119 /* Compiling for TA */ 120 121 static void tag_asan_free(void *buf __unused, size_t len __unused) 122 { 123 } 124 125 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 126 { 127 } 128 129 static void *memset_unchecked(void *s, int c, size_t n) 130 { 131 return memset(s, c, n); 132 } 133 134 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 135 size_t n) 136 { 137 return memcpy(dst, src, n); 138 } 139 140 #endif /*__KERNEL__*/ 141 142 #include "bget.c" /* this is ugly, but this is bget */ 143 144 struct malloc_pool { 145 void *buf; 146 size_t len; 147 }; 148 149 struct malloc_ctx { 150 struct bpoolset poolset; 151 struct malloc_pool *pool; 152 size_t pool_len; 153 #ifdef BufStats 154 struct malloc_stats mstats; 155 #endif 156 #ifdef __KERNEL__ 157 unsigned int spinlock; 158 #endif 159 }; 160 161 #ifdef __KERNEL__ 162 163 static uint32_t malloc_lock(struct malloc_ctx *ctx) 164 { 165 return cpu_spin_lock_xsave(&ctx->spinlock); 166 } 167 168 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 169 { 170 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 171 } 172 173 #else /* __KERNEL__ */ 174 175 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 176 { 177 return 0; 178 } 179 180 static void malloc_unlock(struct malloc_ctx *ctx __unused, 181 uint32_t exceptions __unused) 182 { 183 } 184 185 #endif /* __KERNEL__ */ 186 187 #define DEFINE_CTX(name) struct malloc_ctx name = \ 188 { .poolset = { .freelist = { {0, 0}, \ 189 {&name.poolset.freelist, \ 190 &name.poolset.freelist}}}} 191 192 static DEFINE_CTX(malloc_ctx); 193 194 #ifdef CFG_VIRTUALIZATION 195 static __nex_data DEFINE_CTX(nex_malloc_ctx); 196 #endif 197 198 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 199 { 200 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 201 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 202 print_kernel_stack(); 203 #endif 204 } 205 206 #ifdef BufStats 207 208 static void raw_malloc_return_hook(void *p, size_t requested_size, 209 struct malloc_ctx *ctx) 210 { 211 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 212 ctx->mstats.max_allocated = ctx->poolset.totalloc; 213 214 if (!p) { 215 ctx->mstats.num_alloc_fail++; 216 print_oom(requested_size, ctx); 217 if (requested_size > ctx->mstats.biggest_alloc_fail) { 218 ctx->mstats.biggest_alloc_fail = requested_size; 219 ctx->mstats.biggest_alloc_fail_used = 220 ctx->poolset.totalloc; 221 } 222 } 223 } 224 225 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 226 { 227 uint32_t exceptions = malloc_lock(ctx); 228 229 ctx->mstats.max_allocated = 0; 230 ctx->mstats.num_alloc_fail = 0; 231 ctx->mstats.biggest_alloc_fail = 0; 232 ctx->mstats.biggest_alloc_fail_used = 0; 233 malloc_unlock(ctx, exceptions); 234 } 235 236 void malloc_reset_stats(void) 237 { 238 gen_malloc_reset_stats(&malloc_ctx); 239 } 240 241 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 242 struct malloc_stats *stats) 243 { 244 uint32_t exceptions = malloc_lock(ctx); 245 246 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 247 stats->allocated = ctx->poolset.totalloc; 248 malloc_unlock(ctx, exceptions); 249 } 250 251 void malloc_get_stats(struct malloc_stats *stats) 252 { 253 gen_malloc_get_stats(&malloc_ctx, stats); 254 } 255 256 #else /* BufStats */ 257 258 static void raw_malloc_return_hook(void *p, size_t requested_size, 259 struct malloc_ctx *ctx ) 260 { 261 if (!p) 262 print_oom(requested_size, ctx); 263 } 264 265 #endif /* BufStats */ 266 267 #ifdef BufValid 268 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 269 { 270 size_t n; 271 272 for (n = 0; n < ctx->pool_len; n++) 273 bpoolv(ctx->pool[n].buf); 274 } 275 #else 276 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 277 { 278 } 279 #endif 280 281 struct bpool_iterator { 282 struct bfhead *next_buf; 283 size_t pool_idx; 284 }; 285 286 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 287 struct bpool_iterator *iterator) 288 { 289 iterator->pool_idx = 0; 290 iterator->next_buf = BFH(ctx->pool[0].buf); 291 } 292 293 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 294 size_t *len, bool *isfree) 295 { 296 struct bfhead *b = iterator->next_buf; 297 bufsize bs = b->bh.bsize; 298 299 if (bs == ESent) 300 return false; 301 302 if (bs < 0) { 303 /* Allocated buffer */ 304 bs = -bs; 305 306 *isfree = false; 307 } else { 308 /* Free Buffer */ 309 *isfree = true; 310 311 /* Assert that the free list links are intact */ 312 assert(b->ql.blink->ql.flink == b); 313 assert(b->ql.flink->ql.blink == b); 314 } 315 316 *buf = (uint8_t *)b + sizeof(struct bhead); 317 *len = bs - sizeof(struct bhead); 318 319 iterator->next_buf = BFH((uint8_t *)b + bs); 320 return true; 321 } 322 323 static bool bpool_foreach(struct malloc_ctx *ctx, 324 struct bpool_iterator *iterator, void **buf) 325 { 326 while (true) { 327 size_t len; 328 bool isfree; 329 330 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 331 if (isfree) 332 continue; 333 return true; 334 } 335 336 if ((iterator->pool_idx + 1) >= ctx->pool_len) 337 return false; 338 339 iterator->pool_idx++; 340 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 341 } 342 } 343 344 /* Convenience macro for looping over all allocated buffers */ 345 #define BPOOL_FOREACH(ctx, iterator, bp) \ 346 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 347 bpool_foreach((ctx),(iterator), (bp));) 348 349 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 350 struct malloc_ctx *ctx) 351 { 352 void *ptr = NULL; 353 bufsize s; 354 355 /* 356 * Make sure that malloc has correct alignment of returned buffers. 357 * The assumption is that uintptr_t will be as wide as the largest 358 * required alignment of any type. 359 */ 360 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t)); 361 362 raw_malloc_validate_pools(ctx); 363 364 /* Compute total size, excluding the header */ 365 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 366 goto out; 367 368 /* BGET doesn't like 0 sized allocations */ 369 if (!s) 370 s++; 371 372 ptr = bget(0, hdr_size, s, &ctx->poolset); 373 out: 374 raw_malloc_return_hook(ptr, pl_size, ctx); 375 376 return ptr; 377 } 378 379 static void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 380 { 381 raw_malloc_validate_pools(ctx); 382 383 if (ptr) 384 brel(ptr, &ctx->poolset, wipe); 385 } 386 387 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 388 size_t pl_size, struct malloc_ctx *ctx) 389 { 390 void *ptr = NULL; 391 bufsize s; 392 393 raw_malloc_validate_pools(ctx); 394 395 /* Compute total size, excluding hdr_size */ 396 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 397 goto out; 398 if (ADD_OVERFLOW(s, ftr_size, &s)) 399 goto out; 400 401 /* BGET doesn't like 0 sized allocations */ 402 if (!s) 403 s++; 404 405 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 406 out: 407 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 408 409 return ptr; 410 } 411 412 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 413 size_t pl_size, struct malloc_ctx *ctx) 414 { 415 void *p = NULL; 416 bufsize s; 417 418 /* Compute total size */ 419 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 420 goto out; 421 if (ADD_OVERFLOW(s, ftr_size, &s)) 422 goto out; 423 424 raw_malloc_validate_pools(ctx); 425 426 /* BGET doesn't like 0 sized allocations */ 427 if (!s) 428 s++; 429 430 p = bgetr(ptr, 0, 0, s, &ctx->poolset); 431 out: 432 raw_malloc_return_hook(p, pl_size, ctx); 433 434 return p; 435 } 436 437 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 438 static __maybe_unused bufsize bget_buf_size(void *buf) 439 { 440 bufsize osize; /* Old size of buffer */ 441 struct bhead *b; 442 443 b = BH(((char *)buf) - sizeof(struct bhead)); 444 osize = -b->bsize; 445 #ifdef BECtl 446 if (osize == 0) { 447 /* Buffer acquired directly through acqfcn. */ 448 struct bdhead *bd; 449 450 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 451 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 452 } else 453 #endif 454 osize -= sizeof(struct bhead); 455 assert(osize > 0); 456 return osize; 457 } 458 459 #ifdef ENABLE_MDBG 460 461 struct mdbg_hdr { 462 const char *fname; 463 uint16_t line; 464 uint32_t pl_size; 465 uint32_t magic; 466 #if defined(ARM64) 467 uint64_t pad; 468 #endif 469 }; 470 471 #define MDBG_HEADER_MAGIC 0xadadadad 472 #define MDBG_FOOTER_MAGIC 0xecececec 473 474 static size_t mdbg_get_ftr_size(size_t pl_size) 475 { 476 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 477 478 return ftr_pad + sizeof(uint32_t); 479 } 480 481 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 482 { 483 uint32_t *footer; 484 485 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 486 mdbg_get_ftr_size(hdr->pl_size)); 487 footer--; 488 return footer; 489 } 490 491 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 492 int lineno, size_t pl_size) 493 { 494 uint32_t *footer; 495 496 hdr->fname = fname; 497 hdr->line = lineno; 498 hdr->pl_size = pl_size; 499 hdr->magic = MDBG_HEADER_MAGIC; 500 501 footer = mdbg_get_footer(hdr); 502 *footer = MDBG_FOOTER_MAGIC; 503 } 504 505 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 506 int lineno, size_t size) 507 { 508 struct mdbg_hdr *hdr; 509 uint32_t exceptions = malloc_lock(ctx); 510 511 /* 512 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 513 */ 514 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 515 516 hdr = raw_malloc(sizeof(struct mdbg_hdr), 517 mdbg_get_ftr_size(size), size, ctx); 518 if (hdr) { 519 mdbg_update_hdr(hdr, fname, lineno, size); 520 hdr++; 521 } 522 523 malloc_unlock(ctx, exceptions); 524 return hdr; 525 } 526 527 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 528 { 529 assert(hdr->magic == MDBG_HEADER_MAGIC); 530 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 531 } 532 533 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 534 { 535 struct mdbg_hdr *hdr = ptr; 536 537 if (hdr) { 538 hdr--; 539 assert_header(hdr); 540 hdr->magic = 0; 541 *mdbg_get_footer(hdr) = 0; 542 raw_free(hdr, ctx, wipe); 543 } 544 } 545 546 static void free_helper(void *ptr, bool wipe) 547 { 548 uint32_t exceptions = malloc_lock(&malloc_ctx); 549 550 gen_mdbg_free(&malloc_ctx, ptr, wipe); 551 malloc_unlock(&malloc_ctx, exceptions); 552 } 553 554 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 555 size_t nmemb, size_t size) 556 { 557 struct mdbg_hdr *hdr; 558 uint32_t exceptions = malloc_lock(ctx); 559 560 hdr = raw_calloc(sizeof(struct mdbg_hdr), 561 mdbg_get_ftr_size(nmemb * size), nmemb, size, 562 ctx); 563 if (hdr) { 564 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 565 hdr++; 566 } 567 malloc_unlock(ctx, exceptions); 568 return hdr; 569 } 570 571 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 572 int lineno, void *ptr, size_t size) 573 { 574 struct mdbg_hdr *hdr = ptr; 575 576 if (hdr) { 577 hdr--; 578 assert_header(hdr); 579 } 580 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 581 mdbg_get_ftr_size(size), size, ctx); 582 if (hdr) { 583 mdbg_update_hdr(hdr, fname, lineno, size); 584 hdr++; 585 } 586 return hdr; 587 } 588 589 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 590 int lineno, void *ptr, size_t size) 591 { 592 void *p; 593 uint32_t exceptions = malloc_lock(ctx); 594 595 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 596 malloc_unlock(ctx, exceptions); 597 return p; 598 } 599 600 #define realloc_unlocked(ctx, ptr, size) \ 601 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 602 603 static void *get_payload_start_size(void *raw_buf, size_t *size) 604 { 605 struct mdbg_hdr *hdr = raw_buf; 606 607 assert(bget_buf_size(hdr) >= hdr->pl_size); 608 *size = hdr->pl_size; 609 return hdr + 1; 610 } 611 612 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 613 { 614 struct bpool_iterator itr; 615 void *b; 616 uint32_t exceptions = malloc_lock(ctx); 617 618 raw_malloc_validate_pools(ctx); 619 620 BPOOL_FOREACH(ctx, &itr, &b) { 621 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 622 623 assert_header(hdr); 624 625 if (bufdump > 0) { 626 const char *fname = hdr->fname; 627 628 if (!fname) 629 fname = "unknown"; 630 631 IMSG("buffer: %d bytes %s:%d\n", 632 hdr->pl_size, fname, hdr->line); 633 } 634 } 635 636 malloc_unlock(ctx, exceptions); 637 } 638 639 void *mdbg_malloc(const char *fname, int lineno, size_t size) 640 { 641 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 642 } 643 644 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 645 { 646 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 647 } 648 649 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 650 { 651 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 652 } 653 654 void mdbg_check(int bufdump) 655 { 656 gen_mdbg_check(&malloc_ctx, bufdump); 657 } 658 #else 659 660 void *malloc(size_t size) 661 { 662 void *p; 663 uint32_t exceptions = malloc_lock(&malloc_ctx); 664 665 p = raw_malloc(0, 0, size, &malloc_ctx); 666 malloc_unlock(&malloc_ctx, exceptions); 667 return p; 668 } 669 670 static void free_helper(void *ptr, bool wipe) 671 { 672 uint32_t exceptions = malloc_lock(&malloc_ctx); 673 674 raw_free(ptr, &malloc_ctx, wipe); 675 malloc_unlock(&malloc_ctx, exceptions); 676 } 677 678 void *calloc(size_t nmemb, size_t size) 679 { 680 void *p; 681 uint32_t exceptions = malloc_lock(&malloc_ctx); 682 683 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 684 malloc_unlock(&malloc_ctx, exceptions); 685 return p; 686 } 687 688 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 689 size_t size) 690 { 691 return raw_realloc(ptr, 0, 0, size, ctx); 692 } 693 694 void *realloc(void *ptr, size_t size) 695 { 696 void *p; 697 uint32_t exceptions = malloc_lock(&malloc_ctx); 698 699 p = realloc_unlocked(&malloc_ctx, ptr, size); 700 malloc_unlock(&malloc_ctx, exceptions); 701 return p; 702 } 703 704 static void *get_payload_start_size(void *ptr, size_t *size) 705 { 706 *size = bget_buf_size(ptr); 707 return ptr; 708 } 709 710 #endif 711 712 void free(void *ptr) 713 { 714 free_helper(ptr, false); 715 } 716 717 void free_wipe(void *ptr) 718 { 719 free_helper(ptr, true); 720 } 721 722 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 723 { 724 void *p; 725 size_t l; 726 uint32_t exceptions; 727 uintptr_t start = (uintptr_t)buf; 728 uintptr_t end = start + len; 729 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) & 730 (~(SizeQuant - 1))) + 731 sizeof(struct bhead) * 2; 732 733 734 start = ROUNDUP(start, SizeQuant); 735 end = ROUNDDOWN(end, SizeQuant); 736 assert(start < end); 737 738 if ((end - start) < min_len) { 739 DMSG("Skipping too small pool"); 740 return; 741 } 742 743 exceptions = malloc_lock(ctx); 744 745 tag_asan_free((void *)start, end - start); 746 bpool((void *)start, end - start, &ctx->poolset); 747 l = ctx->pool_len + 1; 748 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 749 assert(p); 750 ctx->pool = p; 751 ctx->pool[ctx->pool_len].buf = (void *)start; 752 ctx->pool[ctx->pool_len].len = end - start; 753 #ifdef BufStats 754 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 755 #endif 756 ctx->pool_len = l; 757 malloc_unlock(ctx, exceptions); 758 } 759 760 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 761 void *buf, size_t len) 762 { 763 struct bpool_iterator itr; 764 void *b; 765 uint8_t *start_buf = buf; 766 uint8_t *end_buf = start_buf + len; 767 bool ret = false; 768 uint32_t exceptions = malloc_lock(ctx); 769 770 raw_malloc_validate_pools(ctx); 771 772 /* Check for wrapping */ 773 if (start_buf > end_buf) 774 goto out; 775 776 BPOOL_FOREACH(ctx, &itr, &b) { 777 uint8_t *start_b; 778 uint8_t *end_b; 779 size_t s; 780 781 start_b = get_payload_start_size(b, &s); 782 end_b = start_b + s; 783 784 if (start_buf >= start_b && end_buf <= end_b) { 785 ret = true; 786 goto out; 787 } 788 } 789 790 out: 791 malloc_unlock(ctx, exceptions); 792 793 return ret; 794 } 795 796 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 797 void *buf, size_t len) 798 { 799 uintptr_t buf_start = (uintptr_t) buf; 800 uintptr_t buf_end = buf_start + len; 801 size_t n; 802 bool ret = false; 803 uint32_t exceptions = malloc_lock(ctx); 804 805 raw_malloc_validate_pools(ctx); 806 807 for (n = 0; n < ctx->pool_len; n++) { 808 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 809 uintptr_t pool_end = pool_start + ctx->pool[n].len; 810 811 if (buf_start > buf_end || pool_start > pool_end) { 812 ret = true; /* Wrapping buffers, shouldn't happen */ 813 goto out; 814 } 815 816 if (buf_end > pool_start || buf_start < pool_end) { 817 ret = true; 818 goto out; 819 } 820 } 821 822 out: 823 malloc_unlock(ctx, exceptions); 824 return ret; 825 } 826 827 void malloc_add_pool(void *buf, size_t len) 828 { 829 gen_malloc_add_pool(&malloc_ctx, buf, len); 830 } 831 832 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 833 { 834 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 835 } 836 837 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 838 { 839 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 840 } 841 842 #ifdef CFG_VIRTUALIZATION 843 844 #ifndef ENABLE_MDBG 845 846 void *nex_malloc(size_t size) 847 { 848 void *p; 849 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 850 851 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 852 malloc_unlock(&nex_malloc_ctx, exceptions); 853 return p; 854 } 855 856 void *nex_calloc(size_t nmemb, size_t size) 857 { 858 void *p; 859 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 860 861 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 862 malloc_unlock(&nex_malloc_ctx, exceptions); 863 return p; 864 } 865 866 void *nex_realloc(void *ptr, size_t size) 867 { 868 void *p; 869 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 870 871 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 872 malloc_unlock(&nex_malloc_ctx, exceptions); 873 return p; 874 } 875 876 void nex_free(void *ptr) 877 { 878 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 879 880 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 881 malloc_unlock(&nex_malloc_ctx, exceptions); 882 } 883 884 #else /* ENABLE_MDBG */ 885 886 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 887 { 888 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 889 } 890 891 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 892 { 893 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 894 } 895 896 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 897 { 898 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 899 } 900 901 void nex_mdbg_check(int bufdump) 902 { 903 gen_mdbg_check(&nex_malloc_ctx, bufdump); 904 } 905 906 void nex_free(void *ptr) 907 { 908 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 909 910 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 911 malloc_unlock(&nex_malloc_ctx, exceptions); 912 } 913 914 #endif /* ENABLE_MDBG */ 915 916 void nex_malloc_add_pool(void *buf, size_t len) 917 { 918 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 919 } 920 921 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 922 { 923 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 924 } 925 926 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 927 { 928 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 929 } 930 931 #ifdef BufStats 932 933 void nex_malloc_reset_stats(void) 934 { 935 gen_malloc_reset_stats(&nex_malloc_ctx); 936 } 937 938 void nex_malloc_get_stats(struct malloc_stats *stats) 939 { 940 gen_malloc_get_stats(&nex_malloc_ctx, stats); 941 } 942 943 #endif 944 945 #endif 946