1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <string.h> 86 #include <trace.h> 87 #include <util.h> 88 89 #if defined(__KERNEL__) 90 /* Compiling for TEE Core */ 91 #include <kernel/asan.h> 92 #include <kernel/thread.h> 93 #include <kernel/spinlock.h> 94 95 static void tag_asan_free(void *buf, size_t len) 96 { 97 asan_tag_heap_free(buf, (uint8_t *)buf + len); 98 } 99 100 static void tag_asan_alloced(void *buf, size_t len) 101 { 102 asan_tag_access(buf, (uint8_t *)buf + len); 103 } 104 105 static void *memset_unchecked(void *s, int c, size_t n) 106 { 107 return asan_memset_unchecked(s, c, n); 108 } 109 110 #else /*__KERNEL__*/ 111 /* Compiling for TA */ 112 113 static void tag_asan_free(void *buf __unused, size_t len __unused) 114 { 115 } 116 117 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 118 { 119 } 120 121 static void *memset_unchecked(void *s, int c, size_t n) 122 { 123 return memset(s, c, n); 124 } 125 126 #endif /*__KERNEL__*/ 127 128 #include "bget.c" /* this is ugly, but this is bget */ 129 130 struct malloc_pool { 131 void *buf; 132 size_t len; 133 }; 134 135 struct malloc_ctx { 136 struct bpoolset poolset; 137 struct malloc_pool *pool; 138 size_t pool_len; 139 #ifdef BufStats 140 struct malloc_stats mstats; 141 #endif 142 #ifdef __KERNEL__ 143 unsigned int spinlock; 144 #endif 145 }; 146 147 #ifdef __KERNEL__ 148 149 static uint32_t malloc_lock(struct malloc_ctx *ctx) 150 { 151 return cpu_spin_lock_xsave(&ctx->spinlock); 152 } 153 154 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 155 { 156 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 157 } 158 159 #else /* __KERNEL__ */ 160 161 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 162 { 163 return 0; 164 } 165 166 static void malloc_unlock(struct malloc_ctx *ctx __unused, 167 uint32_t exceptions __unused) 168 { 169 } 170 171 #endif /* __KERNEL__ */ 172 173 #define DEFINE_CTX(name) struct malloc_ctx name = \ 174 { .poolset = { .freelist = { {0, 0}, \ 175 {&name.poolset.freelist, \ 176 &name.poolset.freelist}}}} 177 178 static DEFINE_CTX(malloc_ctx); 179 180 #ifdef CFG_VIRTUALIZATION 181 static __nex_data DEFINE_CTX(nex_malloc_ctx); 182 #endif 183 184 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 185 { 186 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 187 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 188 EPRINT_STACK(); 189 #endif 190 } 191 192 #ifdef BufStats 193 194 static void raw_malloc_return_hook(void *p, size_t requested_size, 195 struct malloc_ctx *ctx) 196 { 197 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 198 ctx->mstats.max_allocated = ctx->poolset.totalloc; 199 200 if (!p) { 201 ctx->mstats.num_alloc_fail++; 202 print_oom(requested_size, ctx); 203 if (requested_size > ctx->mstats.biggest_alloc_fail) { 204 ctx->mstats.biggest_alloc_fail = requested_size; 205 ctx->mstats.biggest_alloc_fail_used = 206 ctx->poolset.totalloc; 207 } 208 } 209 } 210 211 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 212 { 213 uint32_t exceptions = malloc_lock(ctx); 214 215 ctx->mstats.max_allocated = 0; 216 ctx->mstats.num_alloc_fail = 0; 217 ctx->mstats.biggest_alloc_fail = 0; 218 ctx->mstats.biggest_alloc_fail_used = 0; 219 malloc_unlock(ctx, exceptions); 220 } 221 222 void malloc_reset_stats(void) 223 { 224 gen_malloc_reset_stats(&malloc_ctx); 225 } 226 227 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 228 struct malloc_stats *stats) 229 { 230 uint32_t exceptions = malloc_lock(ctx); 231 232 memcpy(stats, &ctx->mstats, sizeof(*stats)); 233 stats->allocated = ctx->poolset.totalloc; 234 malloc_unlock(ctx, exceptions); 235 } 236 237 void malloc_get_stats(struct malloc_stats *stats) 238 { 239 gen_malloc_get_stats(&malloc_ctx, stats); 240 } 241 242 #else /* BufStats */ 243 244 static void raw_malloc_return_hook(void *p, size_t requested_size, 245 struct malloc_ctx *ctx ) 246 { 247 if (!p) 248 print_oom(requested_size, ctx); 249 } 250 251 #endif /* BufStats */ 252 253 #ifdef BufValid 254 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 255 { 256 size_t n; 257 258 for (n = 0; n < ctx->pool_len; n++) 259 bpoolv(ctx->pool[n].buf); 260 } 261 #else 262 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 263 { 264 } 265 #endif 266 267 struct bpool_iterator { 268 struct bfhead *next_buf; 269 size_t pool_idx; 270 }; 271 272 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 273 struct bpool_iterator *iterator) 274 { 275 iterator->pool_idx = 0; 276 iterator->next_buf = BFH(ctx->pool[0].buf); 277 } 278 279 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 280 size_t *len, bool *isfree) 281 { 282 struct bfhead *b = iterator->next_buf; 283 bufsize bs = b->bh.bsize; 284 285 if (bs == ESent) 286 return false; 287 288 if (bs < 0) { 289 /* Allocated buffer */ 290 bs = -bs; 291 292 *isfree = false; 293 } else { 294 /* Free Buffer */ 295 *isfree = true; 296 297 /* Assert that the free list links are intact */ 298 assert(b->ql.blink->ql.flink == b); 299 assert(b->ql.flink->ql.blink == b); 300 } 301 302 *buf = (uint8_t *)b + sizeof(struct bhead); 303 *len = bs - sizeof(struct bhead); 304 305 iterator->next_buf = BFH((uint8_t *)b + bs); 306 return true; 307 } 308 309 static bool bpool_foreach(struct malloc_ctx *ctx, 310 struct bpool_iterator *iterator, void **buf) 311 { 312 while (true) { 313 size_t len; 314 bool isfree; 315 316 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 317 if (isfree) 318 continue; 319 return true; 320 } 321 322 if ((iterator->pool_idx + 1) >= ctx->pool_len) 323 return false; 324 325 iterator->pool_idx++; 326 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 327 } 328 } 329 330 /* Convenience macro for looping over all allocated buffers */ 331 #define BPOOL_FOREACH(ctx, iterator, bp) \ 332 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 333 bpool_foreach((ctx),(iterator), (bp));) 334 335 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 336 struct malloc_ctx *ctx) 337 { 338 void *ptr = NULL; 339 bufsize s; 340 341 /* 342 * Make sure that malloc has correct alignment of returned buffers. 343 * The assumption is that uintptr_t will be as wide as the largest 344 * required alignment of any type. 345 */ 346 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t)); 347 348 raw_malloc_validate_pools(ctx); 349 350 /* Compute total size */ 351 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 352 goto out; 353 if (ADD_OVERFLOW(s, ftr_size, &s)) 354 goto out; 355 356 /* BGET doesn't like 0 sized allocations */ 357 if (!s) 358 s++; 359 360 ptr = bget(s, &ctx->poolset); 361 out: 362 raw_malloc_return_hook(ptr, pl_size, ctx); 363 364 return ptr; 365 } 366 367 static void raw_free(void *ptr, struct malloc_ctx *ctx) 368 { 369 raw_malloc_validate_pools(ctx); 370 371 if (ptr) 372 brel(ptr, &ctx->poolset); 373 } 374 375 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 376 size_t pl_size, struct malloc_ctx *ctx) 377 { 378 void *ptr = NULL; 379 bufsize s; 380 381 raw_malloc_validate_pools(ctx); 382 383 /* Compute total size */ 384 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 385 goto out; 386 if (ADD_OVERFLOW(s, hdr_size, &s)) 387 goto out; 388 if (ADD_OVERFLOW(s, ftr_size, &s)) 389 goto out; 390 391 /* BGET doesn't like 0 sized allocations */ 392 if (!s) 393 s++; 394 395 ptr = bgetz(s, &ctx->poolset); 396 out: 397 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 398 399 return ptr; 400 } 401 402 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 403 size_t pl_size, struct malloc_ctx *ctx) 404 { 405 void *p = NULL; 406 bufsize s; 407 408 /* Compute total size */ 409 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 410 goto out; 411 if (ADD_OVERFLOW(s, ftr_size, &s)) 412 goto out; 413 414 raw_malloc_validate_pools(ctx); 415 416 /* BGET doesn't like 0 sized allocations */ 417 if (!s) 418 s++; 419 420 p = bgetr(ptr, s, &ctx->poolset); 421 out: 422 raw_malloc_return_hook(p, pl_size, ctx); 423 424 return p; 425 } 426 427 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 428 static __maybe_unused bufsize bget_buf_size(void *buf) 429 { 430 bufsize osize; /* Old size of buffer */ 431 struct bhead *b; 432 433 b = BH(((char *)buf) - sizeof(struct bhead)); 434 osize = -b->bsize; 435 #ifdef BECtl 436 if (osize == 0) { 437 /* Buffer acquired directly through acqfcn. */ 438 struct bdhead *bd; 439 440 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 441 osize = bd->tsize - sizeof(struct bdhead); 442 } else 443 #endif 444 osize -= sizeof(struct bhead); 445 assert(osize > 0); 446 return osize; 447 } 448 449 #ifdef ENABLE_MDBG 450 451 struct mdbg_hdr { 452 const char *fname; 453 uint16_t line; 454 uint32_t pl_size; 455 uint32_t magic; 456 #if defined(ARM64) 457 uint64_t pad; 458 #endif 459 }; 460 461 #define MDBG_HEADER_MAGIC 0xadadadad 462 #define MDBG_FOOTER_MAGIC 0xecececec 463 464 static size_t mdbg_get_ftr_size(size_t pl_size) 465 { 466 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 467 468 return ftr_pad + sizeof(uint32_t); 469 } 470 471 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 472 { 473 uint32_t *footer; 474 475 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 476 mdbg_get_ftr_size(hdr->pl_size)); 477 footer--; 478 return footer; 479 } 480 481 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 482 int lineno, size_t pl_size) 483 { 484 uint32_t *footer; 485 486 hdr->fname = fname; 487 hdr->line = lineno; 488 hdr->pl_size = pl_size; 489 hdr->magic = MDBG_HEADER_MAGIC; 490 491 footer = mdbg_get_footer(hdr); 492 *footer = MDBG_FOOTER_MAGIC; 493 } 494 495 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 496 int lineno, size_t size) 497 { 498 struct mdbg_hdr *hdr; 499 uint32_t exceptions = malloc_lock(ctx); 500 501 /* 502 * Check struct mdbg_hdr doesn't get bad alignment. 503 * This is required by C standard: the buffer returned from 504 * malloc() should be aligned with a fundamental alignment. 505 * For ARM32, the required alignment is 8. For ARM64, it is 16. 506 */ 507 COMPILE_TIME_ASSERT( 508 (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0); 509 510 hdr = raw_malloc(sizeof(struct mdbg_hdr), 511 mdbg_get_ftr_size(size), size, ctx); 512 if (hdr) { 513 mdbg_update_hdr(hdr, fname, lineno, size); 514 hdr++; 515 } 516 517 malloc_unlock(ctx, exceptions); 518 return hdr; 519 } 520 521 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 522 { 523 assert(hdr->magic == MDBG_HEADER_MAGIC); 524 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 525 } 526 527 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr) 528 { 529 struct mdbg_hdr *hdr = ptr; 530 531 if (hdr) { 532 hdr--; 533 assert_header(hdr); 534 hdr->magic = 0; 535 *mdbg_get_footer(hdr) = 0; 536 raw_free(hdr, ctx); 537 } 538 } 539 540 void free(void *ptr) 541 { 542 uint32_t exceptions = malloc_lock(&malloc_ctx); 543 544 gen_mdbg_free(&malloc_ctx, ptr); 545 malloc_unlock(&malloc_ctx, exceptions); 546 } 547 548 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 549 size_t nmemb, size_t size) 550 { 551 struct mdbg_hdr *hdr; 552 uint32_t exceptions = malloc_lock(ctx); 553 554 hdr = raw_calloc(sizeof(struct mdbg_hdr), 555 mdbg_get_ftr_size(nmemb * size), nmemb, size, 556 ctx); 557 if (hdr) { 558 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 559 hdr++; 560 } 561 malloc_unlock(ctx, exceptions); 562 return hdr; 563 } 564 565 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 566 int lineno, void *ptr, size_t size) 567 { 568 struct mdbg_hdr *hdr = ptr; 569 570 if (hdr) { 571 hdr--; 572 assert_header(hdr); 573 } 574 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 575 mdbg_get_ftr_size(size), size, ctx); 576 if (hdr) { 577 mdbg_update_hdr(hdr, fname, lineno, size); 578 hdr++; 579 } 580 return hdr; 581 } 582 583 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 584 int lineno, void *ptr, size_t size) 585 { 586 void *p; 587 uint32_t exceptions = malloc_lock(ctx); 588 589 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 590 malloc_unlock(ctx, exceptions); 591 return p; 592 } 593 594 #define realloc_unlocked(ctx, ptr, size) \ 595 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 596 597 static void *get_payload_start_size(void *raw_buf, size_t *size) 598 { 599 struct mdbg_hdr *hdr = raw_buf; 600 601 assert(bget_buf_size(hdr) >= hdr->pl_size); 602 *size = hdr->pl_size; 603 return hdr + 1; 604 } 605 606 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 607 { 608 struct bpool_iterator itr; 609 void *b; 610 uint32_t exceptions = malloc_lock(ctx); 611 612 raw_malloc_validate_pools(ctx); 613 614 BPOOL_FOREACH(ctx, &itr, &b) { 615 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 616 617 assert_header(hdr); 618 619 if (bufdump > 0) { 620 const char *fname = hdr->fname; 621 622 if (!fname) 623 fname = "unknown"; 624 625 IMSG("buffer: %d bytes %s:%d\n", 626 hdr->pl_size, fname, hdr->line); 627 } 628 } 629 630 malloc_unlock(ctx, exceptions); 631 } 632 633 void *mdbg_malloc(const char *fname, int lineno, size_t size) 634 { 635 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 636 } 637 638 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 639 { 640 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 641 } 642 643 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 644 { 645 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 646 } 647 648 void mdbg_check(int bufdump) 649 { 650 gen_mdbg_check(&malloc_ctx, bufdump); 651 } 652 #else 653 654 void *malloc(size_t size) 655 { 656 void *p; 657 uint32_t exceptions = malloc_lock(&malloc_ctx); 658 659 p = raw_malloc(0, 0, size, &malloc_ctx); 660 malloc_unlock(&malloc_ctx, exceptions); 661 return p; 662 } 663 664 void free(void *ptr) 665 { 666 uint32_t exceptions = malloc_lock(&malloc_ctx); 667 668 raw_free(ptr, &malloc_ctx); 669 malloc_unlock(&malloc_ctx, exceptions); 670 } 671 672 void *calloc(size_t nmemb, size_t size) 673 { 674 void *p; 675 uint32_t exceptions = malloc_lock(&malloc_ctx); 676 677 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 678 malloc_unlock(&malloc_ctx, exceptions); 679 return p; 680 } 681 682 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 683 size_t size) 684 { 685 return raw_realloc(ptr, 0, 0, size, ctx); 686 } 687 688 void *realloc(void *ptr, size_t size) 689 { 690 void *p; 691 uint32_t exceptions = malloc_lock(&malloc_ctx); 692 693 p = realloc_unlocked(&malloc_ctx, ptr, size); 694 malloc_unlock(&malloc_ctx, exceptions); 695 return p; 696 } 697 698 static void *get_payload_start_size(void *ptr, size_t *size) 699 { 700 *size = bget_buf_size(ptr); 701 return ptr; 702 } 703 704 #endif 705 706 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 707 { 708 void *p; 709 size_t l; 710 uint32_t exceptions; 711 uintptr_t start = (uintptr_t)buf; 712 uintptr_t end = start + len; 713 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) & 714 (~(SizeQuant - 1))) + 715 sizeof(struct bhead) * 2; 716 717 718 start = ROUNDUP(start, SizeQuant); 719 end = ROUNDDOWN(end, SizeQuant); 720 assert(start < end); 721 722 if ((end - start) < min_len) { 723 DMSG("Skipping too small pool"); 724 return; 725 } 726 727 exceptions = malloc_lock(ctx); 728 729 tag_asan_free((void *)start, end - start); 730 bpool((void *)start, end - start, &ctx->poolset); 731 l = ctx->pool_len + 1; 732 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 733 assert(p); 734 ctx->pool = p; 735 ctx->pool[ctx->pool_len].buf = (void *)start; 736 ctx->pool[ctx->pool_len].len = end - start; 737 #ifdef BufStats 738 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 739 #endif 740 ctx->pool_len = l; 741 malloc_unlock(ctx, exceptions); 742 } 743 744 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 745 void *buf, size_t len) 746 { 747 struct bpool_iterator itr; 748 void *b; 749 uint8_t *start_buf = buf; 750 uint8_t *end_buf = start_buf + len; 751 bool ret = false; 752 uint32_t exceptions = malloc_lock(ctx); 753 754 raw_malloc_validate_pools(ctx); 755 756 /* Check for wrapping */ 757 if (start_buf > end_buf) 758 goto out; 759 760 BPOOL_FOREACH(ctx, &itr, &b) { 761 uint8_t *start_b; 762 uint8_t *end_b; 763 size_t s; 764 765 start_b = get_payload_start_size(b, &s); 766 end_b = start_b + s; 767 768 if (start_buf >= start_b && end_buf <= end_b) { 769 ret = true; 770 goto out; 771 } 772 } 773 774 out: 775 malloc_unlock(ctx, exceptions); 776 777 return ret; 778 } 779 780 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 781 void *buf, size_t len) 782 { 783 uintptr_t buf_start = (uintptr_t) buf; 784 uintptr_t buf_end = buf_start + len; 785 size_t n; 786 bool ret = false; 787 uint32_t exceptions = malloc_lock(ctx); 788 789 raw_malloc_validate_pools(ctx); 790 791 for (n = 0; n < ctx->pool_len; n++) { 792 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 793 uintptr_t pool_end = pool_start + ctx->pool[n].len; 794 795 if (buf_start > buf_end || pool_start > pool_end) { 796 ret = true; /* Wrapping buffers, shouldn't happen */ 797 goto out; 798 } 799 800 if (buf_end > pool_start || buf_start < pool_end) { 801 ret = true; 802 goto out; 803 } 804 } 805 806 out: 807 malloc_unlock(ctx, exceptions); 808 return ret; 809 } 810 811 void malloc_add_pool(void *buf, size_t len) 812 { 813 gen_malloc_add_pool(&malloc_ctx, buf, len); 814 } 815 816 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 817 { 818 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 819 } 820 821 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 822 { 823 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 824 } 825 826 #ifdef CFG_VIRTUALIZATION 827 828 #ifndef ENABLE_MDBG 829 830 void *nex_malloc(size_t size) 831 { 832 void *p; 833 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 834 835 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 836 malloc_unlock(&nex_malloc_ctx, exceptions); 837 return p; 838 } 839 840 void *nex_calloc(size_t nmemb, size_t size) 841 { 842 void *p; 843 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 844 845 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 846 malloc_unlock(&nex_malloc_ctx, exceptions); 847 return p; 848 } 849 850 void *nex_realloc(void *ptr, size_t size) 851 { 852 void *p; 853 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 854 855 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 856 malloc_unlock(&nex_malloc_ctx, exceptions); 857 return p; 858 } 859 860 void nex_free(void *ptr) 861 { 862 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 863 864 raw_free(ptr, &nex_malloc_ctx); 865 malloc_unlock(&nex_malloc_ctx, exceptions); 866 } 867 868 #else /* ENABLE_MDBG */ 869 870 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 871 { 872 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 873 } 874 875 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 876 { 877 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 878 } 879 880 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 881 { 882 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 883 } 884 885 void nex_mdbg_check(int bufdump) 886 { 887 gen_mdbg_check(&nex_malloc_ctx, bufdump); 888 } 889 890 void nex_free(void *ptr) 891 { 892 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 893 894 gen_mdbg_free(&nex_malloc_ctx, ptr); 895 malloc_unlock(&nex_malloc_ctx, exceptions); 896 } 897 898 #endif /* ENABLE_MDBG */ 899 900 void nex_malloc_add_pool(void *buf, size_t len) 901 { 902 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 903 } 904 905 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 906 { 907 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 908 } 909 910 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 911 { 912 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 913 } 914 915 #ifdef BufStats 916 917 void nex_malloc_reset_stats(void) 918 { 919 gen_malloc_reset_stats(&nex_malloc_ctx); 920 } 921 922 void nex_malloc_get_stats(struct malloc_stats *stats) 923 { 924 gen_malloc_get_stats(&nex_malloc_ctx, stats); 925 } 926 927 #endif 928 929 #endif 930