1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 */ 5 6 #define PROTOTYPES 7 8 /* 9 * BGET CONFIGURATION 10 * ================== 11 */ 12 /* #define BGET_ENABLE_ALL_OPTIONS */ 13 #ifdef BGET_ENABLE_OPTION 14 #define TestProg 20000 /* Generate built-in test program 15 if defined. The value specifies 16 how many buffer allocation attempts 17 the test program should make. */ 18 #endif 19 20 21 #ifdef __LP64__ 22 #define SizeQuant 16 23 #endif 24 #ifdef __ILP32__ 25 #define SizeQuant 8 26 #endif 27 /* Buffer allocation size quantum: 28 all buffers allocated are a 29 multiple of this size. This 30 MUST be a power of two. */ 31 32 #ifdef BGET_ENABLE_OPTION 33 #define BufDump 1 /* Define this symbol to enable the 34 bpoold() function which dumps the 35 buffers in a buffer pool. */ 36 37 #define BufValid 1 /* Define this symbol to enable the 38 bpoolv() function for validating 39 a buffer pool. */ 40 41 #define DumpData 1 /* Define this symbol to enable the 42 bufdump() function which allows 43 dumping the contents of an allocated 44 or free buffer. */ 45 46 #define BufStats 1 /* Define this symbol to enable the 47 bstats() function which calculates 48 the total free space in the buffer 49 pool, the largest available 50 buffer, and the total space 51 currently allocated. */ 52 53 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 54 pattern of garbage to trip up 55 miscreants who attempt to use 56 pointers into released buffers. */ 57 58 #define BestFit 1 /* Use a best fit algorithm when 59 searching for space for an 60 allocation request. This uses 61 memory more efficiently, but 62 allocation will be much slower. */ 63 64 #define BECtl 1 /* Define this symbol to enable the 65 bectl() function for automatic 66 pool space control. */ 67 #endif 68 69 #ifdef MEM_DEBUG 70 #undef NDEBUG 71 #define DumpData 1 72 #define BufValid 1 73 #define FreeWipe 1 74 #endif 75 76 #ifdef CFG_WITH_STATS 77 #define BufStats 1 78 #endif 79 80 #include <compiler.h> 81 #include <malloc.h> 82 #include <stdbool.h> 83 #include <stdint.h> 84 #include <stdlib.h> 85 #include <stdlib_ext.h> 86 #include <string.h> 87 #include <trace.h> 88 #include <util.h> 89 90 #if defined(__KERNEL__) 91 /* Compiling for TEE Core */ 92 #include <kernel/asan.h> 93 #include <kernel/thread.h> 94 #include <kernel/spinlock.h> 95 96 static void tag_asan_free(void *buf, size_t len) 97 { 98 asan_tag_heap_free(buf, (uint8_t *)buf + len); 99 } 100 101 static void tag_asan_alloced(void *buf, size_t len) 102 { 103 asan_tag_access(buf, (uint8_t *)buf + len); 104 } 105 106 static void *memset_unchecked(void *s, int c, size_t n) 107 { 108 return asan_memset_unchecked(s, c, n); 109 } 110 111 #else /*__KERNEL__*/ 112 /* Compiling for TA */ 113 114 static void tag_asan_free(void *buf __unused, size_t len __unused) 115 { 116 } 117 118 static void tag_asan_alloced(void *buf __unused, size_t len __unused) 119 { 120 } 121 122 static void *memset_unchecked(void *s, int c, size_t n) 123 { 124 return memset(s, c, n); 125 } 126 127 #endif /*__KERNEL__*/ 128 129 #include "bget.c" /* this is ugly, but this is bget */ 130 131 struct malloc_pool { 132 void *buf; 133 size_t len; 134 }; 135 136 struct malloc_ctx { 137 struct bpoolset poolset; 138 struct malloc_pool *pool; 139 size_t pool_len; 140 #ifdef BufStats 141 struct malloc_stats mstats; 142 #endif 143 #ifdef __KERNEL__ 144 unsigned int spinlock; 145 #endif 146 }; 147 148 #ifdef __KERNEL__ 149 150 static uint32_t malloc_lock(struct malloc_ctx *ctx) 151 { 152 return cpu_spin_lock_xsave(&ctx->spinlock); 153 } 154 155 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 156 { 157 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 158 } 159 160 #else /* __KERNEL__ */ 161 162 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 163 { 164 return 0; 165 } 166 167 static void malloc_unlock(struct malloc_ctx *ctx __unused, 168 uint32_t exceptions __unused) 169 { 170 } 171 172 #endif /* __KERNEL__ */ 173 174 #define DEFINE_CTX(name) struct malloc_ctx name = \ 175 { .poolset = { .freelist = { {0, 0}, \ 176 {&name.poolset.freelist, \ 177 &name.poolset.freelist}}}} 178 179 static DEFINE_CTX(malloc_ctx); 180 181 #ifdef CFG_VIRTUALIZATION 182 static __nex_data DEFINE_CTX(nex_malloc_ctx); 183 #endif 184 185 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 186 { 187 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 188 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 189 EPRINT_STACK(); 190 #endif 191 } 192 193 #ifdef BufStats 194 195 static void raw_malloc_return_hook(void *p, size_t requested_size, 196 struct malloc_ctx *ctx) 197 { 198 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 199 ctx->mstats.max_allocated = ctx->poolset.totalloc; 200 201 if (!p) { 202 ctx->mstats.num_alloc_fail++; 203 print_oom(requested_size, ctx); 204 if (requested_size > ctx->mstats.biggest_alloc_fail) { 205 ctx->mstats.biggest_alloc_fail = requested_size; 206 ctx->mstats.biggest_alloc_fail_used = 207 ctx->poolset.totalloc; 208 } 209 } 210 } 211 212 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 213 { 214 uint32_t exceptions = malloc_lock(ctx); 215 216 ctx->mstats.max_allocated = 0; 217 ctx->mstats.num_alloc_fail = 0; 218 ctx->mstats.biggest_alloc_fail = 0; 219 ctx->mstats.biggest_alloc_fail_used = 0; 220 malloc_unlock(ctx, exceptions); 221 } 222 223 void malloc_reset_stats(void) 224 { 225 gen_malloc_reset_stats(&malloc_ctx); 226 } 227 228 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 229 struct malloc_stats *stats) 230 { 231 uint32_t exceptions = malloc_lock(ctx); 232 233 memcpy(stats, &ctx->mstats, sizeof(*stats)); 234 stats->allocated = ctx->poolset.totalloc; 235 malloc_unlock(ctx, exceptions); 236 } 237 238 void malloc_get_stats(struct malloc_stats *stats) 239 { 240 gen_malloc_get_stats(&malloc_ctx, stats); 241 } 242 243 #else /* BufStats */ 244 245 static void raw_malloc_return_hook(void *p, size_t requested_size, 246 struct malloc_ctx *ctx ) 247 { 248 if (!p) 249 print_oom(requested_size, ctx); 250 } 251 252 #endif /* BufStats */ 253 254 #ifdef BufValid 255 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 256 { 257 size_t n; 258 259 for (n = 0; n < ctx->pool_len; n++) 260 bpoolv(ctx->pool[n].buf); 261 } 262 #else 263 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 264 { 265 } 266 #endif 267 268 struct bpool_iterator { 269 struct bfhead *next_buf; 270 size_t pool_idx; 271 }; 272 273 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 274 struct bpool_iterator *iterator) 275 { 276 iterator->pool_idx = 0; 277 iterator->next_buf = BFH(ctx->pool[0].buf); 278 } 279 280 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 281 size_t *len, bool *isfree) 282 { 283 struct bfhead *b = iterator->next_buf; 284 bufsize bs = b->bh.bsize; 285 286 if (bs == ESent) 287 return false; 288 289 if (bs < 0) { 290 /* Allocated buffer */ 291 bs = -bs; 292 293 *isfree = false; 294 } else { 295 /* Free Buffer */ 296 *isfree = true; 297 298 /* Assert that the free list links are intact */ 299 assert(b->ql.blink->ql.flink == b); 300 assert(b->ql.flink->ql.blink == b); 301 } 302 303 *buf = (uint8_t *)b + sizeof(struct bhead); 304 *len = bs - sizeof(struct bhead); 305 306 iterator->next_buf = BFH((uint8_t *)b + bs); 307 return true; 308 } 309 310 static bool bpool_foreach(struct malloc_ctx *ctx, 311 struct bpool_iterator *iterator, void **buf) 312 { 313 while (true) { 314 size_t len; 315 bool isfree; 316 317 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 318 if (isfree) 319 continue; 320 return true; 321 } 322 323 if ((iterator->pool_idx + 1) >= ctx->pool_len) 324 return false; 325 326 iterator->pool_idx++; 327 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 328 } 329 } 330 331 /* Convenience macro for looping over all allocated buffers */ 332 #define BPOOL_FOREACH(ctx, iterator, bp) \ 333 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 334 bpool_foreach((ctx),(iterator), (bp));) 335 336 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 337 struct malloc_ctx *ctx) 338 { 339 void *ptr = NULL; 340 bufsize s; 341 342 /* 343 * Make sure that malloc has correct alignment of returned buffers. 344 * The assumption is that uintptr_t will be as wide as the largest 345 * required alignment of any type. 346 */ 347 COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t)); 348 349 raw_malloc_validate_pools(ctx); 350 351 /* Compute total size */ 352 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 353 goto out; 354 if (ADD_OVERFLOW(s, ftr_size, &s)) 355 goto out; 356 357 /* BGET doesn't like 0 sized allocations */ 358 if (!s) 359 s++; 360 361 ptr = bget(s, &ctx->poolset); 362 out: 363 raw_malloc_return_hook(ptr, pl_size, ctx); 364 365 return ptr; 366 } 367 368 static void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 369 { 370 raw_malloc_validate_pools(ctx); 371 372 if (ptr) 373 brel(ptr, &ctx->poolset, wipe); 374 } 375 376 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 377 size_t pl_size, struct malloc_ctx *ctx) 378 { 379 void *ptr = NULL; 380 bufsize s; 381 382 raw_malloc_validate_pools(ctx); 383 384 /* Compute total size */ 385 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 386 goto out; 387 if (ADD_OVERFLOW(s, hdr_size, &s)) 388 goto out; 389 if (ADD_OVERFLOW(s, ftr_size, &s)) 390 goto out; 391 392 /* BGET doesn't like 0 sized allocations */ 393 if (!s) 394 s++; 395 396 ptr = bgetz(s, &ctx->poolset); 397 out: 398 raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 399 400 return ptr; 401 } 402 403 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 404 size_t pl_size, struct malloc_ctx *ctx) 405 { 406 void *p = NULL; 407 bufsize s; 408 409 /* Compute total size */ 410 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 411 goto out; 412 if (ADD_OVERFLOW(s, ftr_size, &s)) 413 goto out; 414 415 raw_malloc_validate_pools(ctx); 416 417 /* BGET doesn't like 0 sized allocations */ 418 if (!s) 419 s++; 420 421 p = bgetr(ptr, s, &ctx->poolset); 422 out: 423 raw_malloc_return_hook(p, pl_size, ctx); 424 425 return p; 426 } 427 428 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 429 static __maybe_unused bufsize bget_buf_size(void *buf) 430 { 431 bufsize osize; /* Old size of buffer */ 432 struct bhead *b; 433 434 b = BH(((char *)buf) - sizeof(struct bhead)); 435 osize = -b->bsize; 436 #ifdef BECtl 437 if (osize == 0) { 438 /* Buffer acquired directly through acqfcn. */ 439 struct bdhead *bd; 440 441 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 442 osize = bd->tsize - sizeof(struct bdhead); 443 } else 444 #endif 445 osize -= sizeof(struct bhead); 446 assert(osize > 0); 447 return osize; 448 } 449 450 #ifdef ENABLE_MDBG 451 452 struct mdbg_hdr { 453 const char *fname; 454 uint16_t line; 455 uint32_t pl_size; 456 uint32_t magic; 457 #if defined(ARM64) 458 uint64_t pad; 459 #endif 460 }; 461 462 #define MDBG_HEADER_MAGIC 0xadadadad 463 #define MDBG_FOOTER_MAGIC 0xecececec 464 465 static size_t mdbg_get_ftr_size(size_t pl_size) 466 { 467 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 468 469 return ftr_pad + sizeof(uint32_t); 470 } 471 472 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 473 { 474 uint32_t *footer; 475 476 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 477 mdbg_get_ftr_size(hdr->pl_size)); 478 footer--; 479 return footer; 480 } 481 482 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 483 int lineno, size_t pl_size) 484 { 485 uint32_t *footer; 486 487 hdr->fname = fname; 488 hdr->line = lineno; 489 hdr->pl_size = pl_size; 490 hdr->magic = MDBG_HEADER_MAGIC; 491 492 footer = mdbg_get_footer(hdr); 493 *footer = MDBG_FOOTER_MAGIC; 494 } 495 496 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 497 int lineno, size_t size) 498 { 499 struct mdbg_hdr *hdr; 500 uint32_t exceptions = malloc_lock(ctx); 501 502 /* 503 * Check struct mdbg_hdr doesn't get bad alignment. 504 * This is required by C standard: the buffer returned from 505 * malloc() should be aligned with a fundamental alignment. 506 * For ARM32, the required alignment is 8. For ARM64, it is 16. 507 */ 508 COMPILE_TIME_ASSERT( 509 (sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0); 510 511 hdr = raw_malloc(sizeof(struct mdbg_hdr), 512 mdbg_get_ftr_size(size), size, ctx); 513 if (hdr) { 514 mdbg_update_hdr(hdr, fname, lineno, size); 515 hdr++; 516 } 517 518 malloc_unlock(ctx, exceptions); 519 return hdr; 520 } 521 522 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 523 { 524 assert(hdr->magic == MDBG_HEADER_MAGIC); 525 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 526 } 527 528 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 529 { 530 struct mdbg_hdr *hdr = ptr; 531 532 if (hdr) { 533 hdr--; 534 assert_header(hdr); 535 hdr->magic = 0; 536 *mdbg_get_footer(hdr) = 0; 537 raw_free(hdr, ctx, wipe); 538 } 539 } 540 541 static void free_helper(void *ptr, bool wipe) 542 { 543 uint32_t exceptions = malloc_lock(&malloc_ctx); 544 545 gen_mdbg_free(&malloc_ctx, ptr, wipe); 546 malloc_unlock(&malloc_ctx, exceptions); 547 } 548 549 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 550 size_t nmemb, size_t size) 551 { 552 struct mdbg_hdr *hdr; 553 uint32_t exceptions = malloc_lock(ctx); 554 555 hdr = raw_calloc(sizeof(struct mdbg_hdr), 556 mdbg_get_ftr_size(nmemb * size), nmemb, size, 557 ctx); 558 if (hdr) { 559 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 560 hdr++; 561 } 562 malloc_unlock(ctx, exceptions); 563 return hdr; 564 } 565 566 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 567 int lineno, void *ptr, size_t size) 568 { 569 struct mdbg_hdr *hdr = ptr; 570 571 if (hdr) { 572 hdr--; 573 assert_header(hdr); 574 } 575 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 576 mdbg_get_ftr_size(size), size, ctx); 577 if (hdr) { 578 mdbg_update_hdr(hdr, fname, lineno, size); 579 hdr++; 580 } 581 return hdr; 582 } 583 584 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 585 int lineno, void *ptr, size_t size) 586 { 587 void *p; 588 uint32_t exceptions = malloc_lock(ctx); 589 590 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 591 malloc_unlock(ctx, exceptions); 592 return p; 593 } 594 595 #define realloc_unlocked(ctx, ptr, size) \ 596 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 597 598 static void *get_payload_start_size(void *raw_buf, size_t *size) 599 { 600 struct mdbg_hdr *hdr = raw_buf; 601 602 assert(bget_buf_size(hdr) >= hdr->pl_size); 603 *size = hdr->pl_size; 604 return hdr + 1; 605 } 606 607 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 608 { 609 struct bpool_iterator itr; 610 void *b; 611 uint32_t exceptions = malloc_lock(ctx); 612 613 raw_malloc_validate_pools(ctx); 614 615 BPOOL_FOREACH(ctx, &itr, &b) { 616 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 617 618 assert_header(hdr); 619 620 if (bufdump > 0) { 621 const char *fname = hdr->fname; 622 623 if (!fname) 624 fname = "unknown"; 625 626 IMSG("buffer: %d bytes %s:%d\n", 627 hdr->pl_size, fname, hdr->line); 628 } 629 } 630 631 malloc_unlock(ctx, exceptions); 632 } 633 634 void *mdbg_malloc(const char *fname, int lineno, size_t size) 635 { 636 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 637 } 638 639 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 640 { 641 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 642 } 643 644 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 645 { 646 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 647 } 648 649 void mdbg_check(int bufdump) 650 { 651 gen_mdbg_check(&malloc_ctx, bufdump); 652 } 653 #else 654 655 void *malloc(size_t size) 656 { 657 void *p; 658 uint32_t exceptions = malloc_lock(&malloc_ctx); 659 660 p = raw_malloc(0, 0, size, &malloc_ctx); 661 malloc_unlock(&malloc_ctx, exceptions); 662 return p; 663 } 664 665 static void free_helper(void *ptr, bool wipe) 666 { 667 uint32_t exceptions = malloc_lock(&malloc_ctx); 668 669 raw_free(ptr, &malloc_ctx, wipe); 670 malloc_unlock(&malloc_ctx, exceptions); 671 } 672 673 void *calloc(size_t nmemb, size_t size) 674 { 675 void *p; 676 uint32_t exceptions = malloc_lock(&malloc_ctx); 677 678 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 679 malloc_unlock(&malloc_ctx, exceptions); 680 return p; 681 } 682 683 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 684 size_t size) 685 { 686 return raw_realloc(ptr, 0, 0, size, ctx); 687 } 688 689 void *realloc(void *ptr, size_t size) 690 { 691 void *p; 692 uint32_t exceptions = malloc_lock(&malloc_ctx); 693 694 p = realloc_unlocked(&malloc_ctx, ptr, size); 695 malloc_unlock(&malloc_ctx, exceptions); 696 return p; 697 } 698 699 static void *get_payload_start_size(void *ptr, size_t *size) 700 { 701 *size = bget_buf_size(ptr); 702 return ptr; 703 } 704 705 #endif 706 707 void free(void *ptr) 708 { 709 free_helper(ptr, false); 710 } 711 712 void free_wipe(void *ptr) 713 { 714 free_helper(ptr, true); 715 } 716 717 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 718 { 719 void *p; 720 size_t l; 721 uint32_t exceptions; 722 uintptr_t start = (uintptr_t)buf; 723 uintptr_t end = start + len; 724 const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) & 725 (~(SizeQuant - 1))) + 726 sizeof(struct bhead) * 2; 727 728 729 start = ROUNDUP(start, SizeQuant); 730 end = ROUNDDOWN(end, SizeQuant); 731 assert(start < end); 732 733 if ((end - start) < min_len) { 734 DMSG("Skipping too small pool"); 735 return; 736 } 737 738 exceptions = malloc_lock(ctx); 739 740 tag_asan_free((void *)start, end - start); 741 bpool((void *)start, end - start, &ctx->poolset); 742 l = ctx->pool_len + 1; 743 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 744 assert(p); 745 ctx->pool = p; 746 ctx->pool[ctx->pool_len].buf = (void *)start; 747 ctx->pool[ctx->pool_len].len = end - start; 748 #ifdef BufStats 749 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 750 #endif 751 ctx->pool_len = l; 752 malloc_unlock(ctx, exceptions); 753 } 754 755 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 756 void *buf, size_t len) 757 { 758 struct bpool_iterator itr; 759 void *b; 760 uint8_t *start_buf = buf; 761 uint8_t *end_buf = start_buf + len; 762 bool ret = false; 763 uint32_t exceptions = malloc_lock(ctx); 764 765 raw_malloc_validate_pools(ctx); 766 767 /* Check for wrapping */ 768 if (start_buf > end_buf) 769 goto out; 770 771 BPOOL_FOREACH(ctx, &itr, &b) { 772 uint8_t *start_b; 773 uint8_t *end_b; 774 size_t s; 775 776 start_b = get_payload_start_size(b, &s); 777 end_b = start_b + s; 778 779 if (start_buf >= start_b && end_buf <= end_b) { 780 ret = true; 781 goto out; 782 } 783 } 784 785 out: 786 malloc_unlock(ctx, exceptions); 787 788 return ret; 789 } 790 791 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 792 void *buf, size_t len) 793 { 794 uintptr_t buf_start = (uintptr_t) buf; 795 uintptr_t buf_end = buf_start + len; 796 size_t n; 797 bool ret = false; 798 uint32_t exceptions = malloc_lock(ctx); 799 800 raw_malloc_validate_pools(ctx); 801 802 for (n = 0; n < ctx->pool_len; n++) { 803 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 804 uintptr_t pool_end = pool_start + ctx->pool[n].len; 805 806 if (buf_start > buf_end || pool_start > pool_end) { 807 ret = true; /* Wrapping buffers, shouldn't happen */ 808 goto out; 809 } 810 811 if (buf_end > pool_start || buf_start < pool_end) { 812 ret = true; 813 goto out; 814 } 815 } 816 817 out: 818 malloc_unlock(ctx, exceptions); 819 return ret; 820 } 821 822 void malloc_add_pool(void *buf, size_t len) 823 { 824 gen_malloc_add_pool(&malloc_ctx, buf, len); 825 } 826 827 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 828 { 829 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 830 } 831 832 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 833 { 834 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 835 } 836 837 #ifdef CFG_VIRTUALIZATION 838 839 #ifndef ENABLE_MDBG 840 841 void *nex_malloc(size_t size) 842 { 843 void *p; 844 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 845 846 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 847 malloc_unlock(&nex_malloc_ctx, exceptions); 848 return p; 849 } 850 851 void *nex_calloc(size_t nmemb, size_t size) 852 { 853 void *p; 854 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 855 856 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 857 malloc_unlock(&nex_malloc_ctx, exceptions); 858 return p; 859 } 860 861 void *nex_realloc(void *ptr, size_t size) 862 { 863 void *p; 864 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 865 866 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 867 malloc_unlock(&nex_malloc_ctx, exceptions); 868 return p; 869 } 870 871 void nex_free(void *ptr) 872 { 873 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 874 875 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 876 malloc_unlock(&nex_malloc_ctx, exceptions); 877 } 878 879 #else /* ENABLE_MDBG */ 880 881 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 882 { 883 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 884 } 885 886 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 887 { 888 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 889 } 890 891 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 892 { 893 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 894 } 895 896 void nex_mdbg_check(int bufdump) 897 { 898 gen_mdbg_check(&nex_malloc_ctx, bufdump); 899 } 900 901 void nex_free(void *ptr) 902 { 903 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 904 905 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 906 malloc_unlock(&nex_malloc_ctx, exceptions); 907 } 908 909 #endif /* ENABLE_MDBG */ 910 911 void nex_malloc_add_pool(void *buf, size_t len) 912 { 913 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 914 } 915 916 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 917 { 918 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 919 } 920 921 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 922 { 923 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 924 } 925 926 #ifdef BufStats 927 928 void nex_malloc_reset_stats(void) 929 { 930 gen_malloc_reset_stats(&nex_malloc_ctx); 931 } 932 933 void nex_malloc_get_stats(struct malloc_stats *stats) 934 { 935 gen_malloc_get_stats(&nex_malloc_ctx, stats); 936 } 937 938 #endif 939 940 #endif 941