1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2022, Linaro Limited. 5 */ 6 7 #define PROTOTYPES 8 9 /* 10 * BGET CONFIGURATION 11 * ================== 12 */ 13 /* #define BGET_ENABLE_ALL_OPTIONS */ 14 #ifdef BGET_ENABLE_OPTION 15 #define TestProg 20000 /* Generate built-in test program 16 if defined. The value specifies 17 how many buffer allocation attempts 18 the test program should make. */ 19 #endif 20 21 22 #ifdef __LP64__ 23 #define SizeQuant 16 24 #endif 25 #ifdef __ILP32__ 26 #define SizeQuant 8 27 #endif 28 /* Buffer allocation size quantum: 29 all buffers allocated are a 30 multiple of this size. This 31 MUST be a power of two. */ 32 33 #ifdef BGET_ENABLE_OPTION 34 #define BufDump 1 /* Define this symbol to enable the 35 bpoold() function which dumps the 36 buffers in a buffer pool. */ 37 38 #define BufValid 1 /* Define this symbol to enable the 39 bpoolv() function for validating 40 a buffer pool. */ 41 42 #define DumpData 1 /* Define this symbol to enable the 43 bufdump() function which allows 44 dumping the contents of an allocated 45 or free buffer. */ 46 47 #define BufStats 1 /* Define this symbol to enable the 48 bstats() function which calculates 49 the total free space in the buffer 50 pool, the largest available 51 buffer, and the total space 52 currently allocated. */ 53 54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed 55 pattern of garbage to trip up 56 miscreants who attempt to use 57 pointers into released buffers. */ 58 59 #define BestFit 1 /* Use a best fit algorithm when 60 searching for space for an 61 allocation request. This uses 62 memory more efficiently, but 63 allocation will be much slower. */ 64 65 #define BECtl 1 /* Define this symbol to enable the 66 bectl() function for automatic 67 pool space control. */ 68 #endif 69 70 #ifdef MEM_DEBUG 71 #undef NDEBUG 72 #define DumpData 1 73 #define BufValid 1 74 #define FreeWipe 1 75 #endif 76 77 #ifdef CFG_WITH_STATS 78 #define BufStats 1 79 #endif 80 81 #include <compiler.h> 82 #include <config.h> 83 #include <malloc.h> 84 #include <stdbool.h> 85 #include <stdint.h> 86 #include <stdlib_ext.h> 87 #include <stdlib.h> 88 #include <string.h> 89 #include <trace.h> 90 #include <util.h> 91 92 #if defined(__KERNEL__) 93 /* Compiling for TEE Core */ 94 #include <kernel/asan.h> 95 #include <kernel/spinlock.h> 96 #include <kernel/unwind.h> 97 98 static void *memset_unchecked(void *s, int c, size_t n) 99 { 100 return asan_memset_unchecked(s, c, n); 101 } 102 103 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 104 size_t n) 105 { 106 return asan_memcpy_unchecked(dst, src, n); 107 } 108 109 #else /*__KERNEL__*/ 110 /* Compiling for TA */ 111 112 static void *memset_unchecked(void *s, int c, size_t n) 113 { 114 return memset(s, c, n); 115 } 116 117 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src, 118 size_t n) 119 { 120 return memcpy(dst, src, n); 121 } 122 123 #endif /*__KERNEL__*/ 124 125 #include "bget.c" /* this is ugly, but this is bget */ 126 127 struct malloc_pool { 128 void *buf; 129 size_t len; 130 }; 131 132 struct malloc_ctx { 133 struct bpoolset poolset; 134 struct malloc_pool *pool; 135 size_t pool_len; 136 #ifdef BufStats 137 struct malloc_stats mstats; 138 #endif 139 #ifdef __KERNEL__ 140 unsigned int spinlock; 141 #endif 142 }; 143 144 #ifdef __KERNEL__ 145 146 static uint32_t malloc_lock(struct malloc_ctx *ctx) 147 { 148 return cpu_spin_lock_xsave(&ctx->spinlock); 149 } 150 151 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions) 152 { 153 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions); 154 } 155 156 #else /* __KERNEL__ */ 157 158 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused) 159 { 160 return 0; 161 } 162 163 static void malloc_unlock(struct malloc_ctx *ctx __unused, 164 uint32_t exceptions __unused) 165 { 166 } 167 168 #endif /* __KERNEL__ */ 169 170 #define DEFINE_CTX(name) struct malloc_ctx name = \ 171 { .poolset = { .freelist = { {0, 0}, \ 172 {&name.poolset.freelist, \ 173 &name.poolset.freelist}}}} 174 175 static DEFINE_CTX(malloc_ctx); 176 177 #ifdef CFG_VIRTUALIZATION 178 static __nex_data DEFINE_CTX(nex_malloc_ctx); 179 #endif 180 181 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused) 182 { 183 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM) 184 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx); 185 print_kernel_stack(); 186 #endif 187 } 188 189 /* Most of the stuff in this function is copied from bgetr() in bget.c */ 190 static __maybe_unused bufsize bget_buf_size(void *buf) 191 { 192 bufsize osize; /* Old size of buffer */ 193 struct bhead *b; 194 195 b = BH(((char *)buf) - sizeof(struct bhead)); 196 osize = -b->bsize; 197 #ifdef BECtl 198 if (osize == 0) { 199 /* Buffer acquired directly through acqfcn. */ 200 struct bdhead *bd; 201 202 bd = BDH(((char *)buf) - sizeof(struct bdhead)); 203 osize = bd->tsize - sizeof(struct bdhead) - bd->offs; 204 } else 205 #endif 206 osize -= sizeof(struct bhead); 207 assert(osize > 0); 208 return osize; 209 } 210 211 static void *maybe_tag_buf(void *buf, size_t __maybe_unused requested_size) 212 { 213 if (!buf) 214 return NULL; 215 216 #if defined(__KERNEL__) 217 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 218 asan_tag_access(buf, (uint8_t *)buf + requested_size); 219 #endif 220 return buf; 221 } 222 223 static void *maybe_untag_buf(void *buf) 224 { 225 if (!buf) 226 return NULL; 227 228 #if defined(__KERNEL__) 229 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 230 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf)); 231 #endif 232 return buf; 233 } 234 235 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused) 236 { 237 #if defined(__KERNEL__) 238 asan_tag_heap_free(buf, (uint8_t *)buf + len); 239 #endif 240 } 241 242 #ifdef BufStats 243 244 static void *raw_malloc_return_hook(void *p, size_t requested_size, 245 struct malloc_ctx *ctx) 246 { 247 if (ctx->poolset.totalloc > ctx->mstats.max_allocated) 248 ctx->mstats.max_allocated = ctx->poolset.totalloc; 249 250 if (!p) { 251 ctx->mstats.num_alloc_fail++; 252 print_oom(requested_size, ctx); 253 if (requested_size > ctx->mstats.biggest_alloc_fail) { 254 ctx->mstats.biggest_alloc_fail = requested_size; 255 ctx->mstats.biggest_alloc_fail_used = 256 ctx->poolset.totalloc; 257 } 258 } 259 260 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 261 } 262 263 static void gen_malloc_reset_stats(struct malloc_ctx *ctx) 264 { 265 uint32_t exceptions = malloc_lock(ctx); 266 267 ctx->mstats.max_allocated = 0; 268 ctx->mstats.num_alloc_fail = 0; 269 ctx->mstats.biggest_alloc_fail = 0; 270 ctx->mstats.biggest_alloc_fail_used = 0; 271 malloc_unlock(ctx, exceptions); 272 } 273 274 void malloc_reset_stats(void) 275 { 276 gen_malloc_reset_stats(&malloc_ctx); 277 } 278 279 static void gen_malloc_get_stats(struct malloc_ctx *ctx, 280 struct malloc_stats *stats) 281 { 282 uint32_t exceptions = malloc_lock(ctx); 283 284 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats)); 285 stats->allocated = ctx->poolset.totalloc; 286 malloc_unlock(ctx, exceptions); 287 } 288 289 void malloc_get_stats(struct malloc_stats *stats) 290 { 291 gen_malloc_get_stats(&malloc_ctx, stats); 292 } 293 294 #else /* BufStats */ 295 296 static void *raw_malloc_return_hook(void *p, size_t requested_size, 297 struct malloc_ctx *ctx ) 298 { 299 if (!p) 300 print_oom(requested_size, ctx); 301 302 return maybe_tag_buf(p, MAX(SizeQuant, requested_size)); 303 } 304 305 #endif /* BufStats */ 306 307 #ifdef BufValid 308 static void raw_malloc_validate_pools(struct malloc_ctx *ctx) 309 { 310 size_t n; 311 312 for (n = 0; n < ctx->pool_len; n++) 313 bpoolv(ctx->pool[n].buf); 314 } 315 #else 316 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused) 317 { 318 } 319 #endif 320 321 struct bpool_iterator { 322 struct bfhead *next_buf; 323 size_t pool_idx; 324 }; 325 326 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx, 327 struct bpool_iterator *iterator) 328 { 329 iterator->pool_idx = 0; 330 iterator->next_buf = BFH(ctx->pool[0].buf); 331 } 332 333 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf, 334 size_t *len, bool *isfree) 335 { 336 struct bfhead *b = iterator->next_buf; 337 bufsize bs = b->bh.bsize; 338 339 if (bs == ESent) 340 return false; 341 342 if (bs < 0) { 343 /* Allocated buffer */ 344 bs = -bs; 345 346 *isfree = false; 347 } else { 348 /* Free Buffer */ 349 *isfree = true; 350 351 /* Assert that the free list links are intact */ 352 assert(b->ql.blink->ql.flink == b); 353 assert(b->ql.flink->ql.blink == b); 354 } 355 356 *buf = (uint8_t *)b + sizeof(struct bhead); 357 *len = bs - sizeof(struct bhead); 358 359 iterator->next_buf = BFH((uint8_t *)b + bs); 360 return true; 361 } 362 363 static bool bpool_foreach(struct malloc_ctx *ctx, 364 struct bpool_iterator *iterator, void **buf) 365 { 366 while (true) { 367 size_t len; 368 bool isfree; 369 370 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) { 371 if (isfree) 372 continue; 373 return true; 374 } 375 376 if ((iterator->pool_idx + 1) >= ctx->pool_len) 377 return false; 378 379 iterator->pool_idx++; 380 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf); 381 } 382 } 383 384 /* Convenience macro for looping over all allocated buffers */ 385 #define BPOOL_FOREACH(ctx, iterator, bp) \ 386 for (bpool_foreach_iterator_init((ctx),(iterator)); \ 387 bpool_foreach((ctx),(iterator), (bp));) 388 389 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment, 390 size_t pl_size, struct malloc_ctx *ctx) 391 { 392 void *ptr = NULL; 393 bufsize s; 394 395 if (!alignment || !IS_POWER_OF_TWO(alignment)) 396 return NULL; 397 398 raw_malloc_validate_pools(ctx); 399 400 /* Compute total size, excluding the header */ 401 if (ADD_OVERFLOW(pl_size, ftr_size, &s)) 402 goto out; 403 404 /* BGET doesn't like 0 sized allocations */ 405 if (!s) 406 s++; 407 408 ptr = bget(alignment, hdr_size, s, &ctx->poolset); 409 out: 410 return raw_malloc_return_hook(ptr, pl_size, ctx); 411 } 412 413 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size, 414 struct malloc_ctx *ctx) 415 { 416 /* 417 * Note that we're feeding SizeQ as alignment, this is the smallest 418 * alignment that bget() can use. 419 */ 420 return raw_memalign(hdr_size, ftr_size, SizeQ, pl_size, ctx); 421 } 422 423 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe) 424 { 425 raw_malloc_validate_pools(ctx); 426 427 if (ptr) 428 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe); 429 } 430 431 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb, 432 size_t pl_size, struct malloc_ctx *ctx) 433 { 434 void *ptr = NULL; 435 bufsize s; 436 437 raw_malloc_validate_pools(ctx); 438 439 /* Compute total size, excluding hdr_size */ 440 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s)) 441 goto out; 442 if (ADD_OVERFLOW(s, ftr_size, &s)) 443 goto out; 444 445 /* BGET doesn't like 0 sized allocations */ 446 if (!s) 447 s++; 448 449 ptr = bgetz(0, hdr_size, s, &ctx->poolset); 450 out: 451 return raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx); 452 } 453 454 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size, 455 size_t pl_size, struct malloc_ctx *ctx) 456 { 457 void *p = NULL; 458 bufsize s; 459 460 /* Compute total size */ 461 if (ADD_OVERFLOW(pl_size, hdr_size, &s)) 462 goto out; 463 if (ADD_OVERFLOW(s, ftr_size, &s)) 464 goto out; 465 466 raw_malloc_validate_pools(ctx); 467 468 /* BGET doesn't like 0 sized allocations */ 469 if (!s) 470 s++; 471 472 p = bgetr(maybe_untag_buf(ptr), 0, 0, s, &ctx->poolset); 473 out: 474 return raw_malloc_return_hook(p, pl_size, ctx); 475 } 476 477 #ifdef ENABLE_MDBG 478 479 struct mdbg_hdr { 480 const char *fname; 481 uint16_t line; 482 uint32_t pl_size; 483 uint32_t magic; 484 #if defined(ARM64) 485 uint64_t pad; 486 #endif 487 }; 488 489 #define MDBG_HEADER_MAGIC 0xadadadad 490 #define MDBG_FOOTER_MAGIC 0xecececec 491 492 static size_t mdbg_get_ftr_size(size_t pl_size) 493 { 494 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size; 495 496 return ftr_pad + sizeof(uint32_t); 497 } 498 499 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr) 500 { 501 uint32_t *footer; 502 503 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size + 504 mdbg_get_ftr_size(hdr->pl_size)); 505 footer--; 506 return footer; 507 } 508 509 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname, 510 int lineno, size_t pl_size) 511 { 512 uint32_t *footer; 513 514 hdr->fname = fname; 515 hdr->line = lineno; 516 hdr->pl_size = pl_size; 517 hdr->magic = MDBG_HEADER_MAGIC; 518 519 footer = mdbg_get_footer(hdr); 520 *footer = MDBG_FOOTER_MAGIC; 521 } 522 523 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname, 524 int lineno, size_t size) 525 { 526 struct mdbg_hdr *hdr; 527 uint32_t exceptions = malloc_lock(ctx); 528 529 /* 530 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM. 531 */ 532 COMPILE_TIME_ASSERT((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0); 533 534 hdr = raw_malloc(sizeof(struct mdbg_hdr), 535 mdbg_get_ftr_size(size), size, ctx); 536 if (hdr) { 537 mdbg_update_hdr(hdr, fname, lineno, size); 538 hdr++; 539 } 540 541 malloc_unlock(ctx, exceptions); 542 return hdr; 543 } 544 545 static void assert_header(struct mdbg_hdr *hdr __maybe_unused) 546 { 547 assert(hdr->magic == MDBG_HEADER_MAGIC); 548 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC); 549 } 550 551 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr, bool wipe) 552 { 553 struct mdbg_hdr *hdr = ptr; 554 555 if (hdr) { 556 hdr--; 557 assert_header(hdr); 558 hdr->magic = 0; 559 *mdbg_get_footer(hdr) = 0; 560 raw_free(hdr, ctx, wipe); 561 } 562 } 563 564 static void free_helper(void *ptr, bool wipe) 565 { 566 uint32_t exceptions = malloc_lock(&malloc_ctx); 567 568 gen_mdbg_free(&malloc_ctx, ptr, wipe); 569 malloc_unlock(&malloc_ctx, exceptions); 570 } 571 572 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno, 573 size_t nmemb, size_t size) 574 { 575 struct mdbg_hdr *hdr; 576 uint32_t exceptions = malloc_lock(ctx); 577 578 hdr = raw_calloc(sizeof(struct mdbg_hdr), 579 mdbg_get_ftr_size(nmemb * size), nmemb, size, 580 ctx); 581 if (hdr) { 582 mdbg_update_hdr(hdr, fname, lineno, nmemb * size); 583 hdr++; 584 } 585 malloc_unlock(ctx, exceptions); 586 return hdr; 587 } 588 589 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname, 590 int lineno, void *ptr, size_t size) 591 { 592 struct mdbg_hdr *hdr = ptr; 593 594 if (hdr) { 595 hdr--; 596 assert_header(hdr); 597 } 598 hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr), 599 mdbg_get_ftr_size(size), size, ctx); 600 if (hdr) { 601 mdbg_update_hdr(hdr, fname, lineno, size); 602 hdr++; 603 } 604 return hdr; 605 } 606 607 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname, 608 int lineno, void *ptr, size_t size) 609 { 610 void *p; 611 uint32_t exceptions = malloc_lock(ctx); 612 613 p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size); 614 malloc_unlock(ctx, exceptions); 615 return p; 616 } 617 618 #define realloc_unlocked(ctx, ptr, size) \ 619 gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size)) 620 621 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname, 622 int lineno, size_t alignment, size_t size) 623 { 624 struct mdbg_hdr *hdr; 625 uint32_t exceptions = malloc_lock(ctx); 626 627 hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size), 628 alignment, size, ctx); 629 if (hdr) { 630 mdbg_update_hdr(hdr, fname, lineno, size); 631 hdr++; 632 } 633 malloc_unlock(ctx, exceptions); 634 return hdr; 635 } 636 637 638 static void *get_payload_start_size(void *raw_buf, size_t *size) 639 { 640 struct mdbg_hdr *hdr = raw_buf; 641 642 assert(bget_buf_size(hdr) >= hdr->pl_size); 643 *size = hdr->pl_size; 644 return hdr + 1; 645 } 646 647 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump) 648 { 649 struct bpool_iterator itr; 650 void *b; 651 uint32_t exceptions = malloc_lock(ctx); 652 653 raw_malloc_validate_pools(ctx); 654 655 BPOOL_FOREACH(ctx, &itr, &b) { 656 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b; 657 658 assert_header(hdr); 659 660 if (bufdump > 0) { 661 const char *fname = hdr->fname; 662 663 if (!fname) 664 fname = "unknown"; 665 666 IMSG("buffer: %d bytes %s:%d\n", 667 hdr->pl_size, fname, hdr->line); 668 } 669 } 670 671 malloc_unlock(ctx, exceptions); 672 } 673 674 void *mdbg_malloc(const char *fname, int lineno, size_t size) 675 { 676 return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size); 677 } 678 679 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 680 { 681 return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size); 682 } 683 684 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 685 { 686 return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size); 687 } 688 689 void *mdbg_memalign(const char *fname, int lineno, size_t alignment, 690 size_t size) 691 { 692 return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size); 693 } 694 695 void mdbg_check(int bufdump) 696 { 697 gen_mdbg_check(&malloc_ctx, bufdump); 698 } 699 700 /* 701 * Since malloc debug is enabled, malloc() and friends are redirected by macros 702 * to mdbg_malloc() etc. 703 * We still want to export the standard entry points in case they are referenced 704 * by the application, either directly or via external libraries. 705 */ 706 #undef malloc 707 void *malloc(size_t size) 708 { 709 return mdbg_malloc(__FILE__, __LINE__, size); 710 } 711 712 #undef calloc 713 void *calloc(size_t nmemb, size_t size) 714 { 715 return mdbg_calloc(__FILE__, __LINE__, nmemb, size); 716 } 717 718 #undef realloc 719 void *realloc(void *ptr, size_t size) 720 { 721 return mdbg_realloc(__FILE__, __LINE__, ptr, size); 722 } 723 724 #else /* ENABLE_MDBG */ 725 726 void *malloc(size_t size) 727 { 728 void *p; 729 uint32_t exceptions = malloc_lock(&malloc_ctx); 730 731 p = raw_malloc(0, 0, size, &malloc_ctx); 732 malloc_unlock(&malloc_ctx, exceptions); 733 return p; 734 } 735 736 static void free_helper(void *ptr, bool wipe) 737 { 738 uint32_t exceptions = malloc_lock(&malloc_ctx); 739 740 raw_free(ptr, &malloc_ctx, wipe); 741 malloc_unlock(&malloc_ctx, exceptions); 742 } 743 744 void *calloc(size_t nmemb, size_t size) 745 { 746 void *p; 747 uint32_t exceptions = malloc_lock(&malloc_ctx); 748 749 p = raw_calloc(0, 0, nmemb, size, &malloc_ctx); 750 malloc_unlock(&malloc_ctx, exceptions); 751 return p; 752 } 753 754 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr, 755 size_t size) 756 { 757 return raw_realloc(ptr, 0, 0, size, ctx); 758 } 759 760 void *realloc(void *ptr, size_t size) 761 { 762 void *p; 763 uint32_t exceptions = malloc_lock(&malloc_ctx); 764 765 p = realloc_unlocked(&malloc_ctx, ptr, size); 766 malloc_unlock(&malloc_ctx, exceptions); 767 return p; 768 } 769 770 void *memalign(size_t alignment, size_t size) 771 { 772 void *p; 773 uint32_t exceptions = malloc_lock(&malloc_ctx); 774 775 p = raw_memalign(0, 0, alignment, size, &malloc_ctx); 776 malloc_unlock(&malloc_ctx, exceptions); 777 return p; 778 } 779 780 static void *get_payload_start_size(void *ptr, size_t *size) 781 { 782 *size = bget_buf_size(ptr); 783 return ptr; 784 } 785 786 #endif 787 788 void free(void *ptr) 789 { 790 free_helper(ptr, false); 791 } 792 793 void free_wipe(void *ptr) 794 { 795 free_helper(ptr, true); 796 } 797 798 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 799 { 800 void *p; 801 size_t l; 802 uint32_t exceptions; 803 uintptr_t start = (uintptr_t)buf; 804 uintptr_t end = start + len; 805 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead); 806 807 start = ROUNDUP(start, SizeQuant); 808 end = ROUNDDOWN(end, SizeQuant); 809 810 if (start > end || (end - start) < min_len) { 811 DMSG("Skipping too small pool"); 812 return; 813 } 814 815 /* First pool requires a bigger size */ 816 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) { 817 DMSG("Skipping too small initial pool"); 818 return; 819 } 820 821 exceptions = malloc_lock(ctx); 822 823 tag_asan_free((void *)start, end - start); 824 bpool((void *)start, end - start, &ctx->poolset); 825 l = ctx->pool_len + 1; 826 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l); 827 assert(p); 828 ctx->pool = p; 829 ctx->pool[ctx->pool_len].buf = (void *)start; 830 ctx->pool[ctx->pool_len].len = end - start; 831 #ifdef BufStats 832 ctx->mstats.size += ctx->pool[ctx->pool_len].len; 833 #endif 834 ctx->pool_len = l; 835 malloc_unlock(ctx, exceptions); 836 } 837 838 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx, 839 void *buf, size_t len) 840 { 841 struct bpool_iterator itr; 842 void *b; 843 uint8_t *start_buf = buf; 844 uint8_t *end_buf = start_buf + len; 845 bool ret = false; 846 uint32_t exceptions = malloc_lock(ctx); 847 848 raw_malloc_validate_pools(ctx); 849 850 /* Check for wrapping */ 851 if (start_buf > end_buf) 852 goto out; 853 854 BPOOL_FOREACH(ctx, &itr, &b) { 855 uint8_t *start_b; 856 uint8_t *end_b; 857 size_t s; 858 859 start_b = get_payload_start_size(b, &s); 860 end_b = start_b + s; 861 862 if (start_buf >= start_b && end_buf <= end_b) { 863 ret = true; 864 goto out; 865 } 866 } 867 868 out: 869 malloc_unlock(ctx, exceptions); 870 871 return ret; 872 } 873 874 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx, 875 void *buf, size_t len) 876 { 877 uintptr_t buf_start = (uintptr_t) buf; 878 uintptr_t buf_end = buf_start + len; 879 size_t n; 880 bool ret = false; 881 uint32_t exceptions = malloc_lock(ctx); 882 883 raw_malloc_validate_pools(ctx); 884 885 for (n = 0; n < ctx->pool_len; n++) { 886 uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf; 887 uintptr_t pool_end = pool_start + ctx->pool[n].len; 888 889 if (buf_start > buf_end || pool_start > pool_end) { 890 ret = true; /* Wrapping buffers, shouldn't happen */ 891 goto out; 892 } 893 894 if (buf_end > pool_start || buf_start < pool_end) { 895 ret = true; 896 goto out; 897 } 898 } 899 900 out: 901 malloc_unlock(ctx, exceptions); 902 return ret; 903 } 904 905 size_t raw_malloc_get_ctx_size(void) 906 { 907 return sizeof(struct malloc_ctx); 908 } 909 910 void raw_malloc_init_ctx(struct malloc_ctx *ctx) 911 { 912 memset(ctx, 0, sizeof(*ctx)); 913 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist; 914 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist; 915 } 916 917 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len) 918 { 919 gen_malloc_add_pool(ctx, buf, len); 920 } 921 922 #ifdef CFG_WITH_STATS 923 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct malloc_stats *stats) 924 { 925 gen_malloc_get_stats(ctx, stats); 926 } 927 #endif 928 929 void malloc_add_pool(void *buf, size_t len) 930 { 931 gen_malloc_add_pool(&malloc_ctx, buf, len); 932 } 933 934 bool malloc_buffer_is_within_alloced(void *buf, size_t len) 935 { 936 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len); 937 } 938 939 bool malloc_buffer_overlaps_heap(void *buf, size_t len) 940 { 941 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len); 942 } 943 944 #ifdef CFG_VIRTUALIZATION 945 946 #ifndef ENABLE_MDBG 947 948 void *nex_malloc(size_t size) 949 { 950 void *p; 951 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 952 953 p = raw_malloc(0, 0, size, &nex_malloc_ctx); 954 malloc_unlock(&nex_malloc_ctx, exceptions); 955 return p; 956 } 957 958 void *nex_calloc(size_t nmemb, size_t size) 959 { 960 void *p; 961 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 962 963 p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx); 964 malloc_unlock(&nex_malloc_ctx, exceptions); 965 return p; 966 } 967 968 void *nex_realloc(void *ptr, size_t size) 969 { 970 void *p; 971 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 972 973 p = realloc_unlocked(&nex_malloc_ctx, ptr, size); 974 malloc_unlock(&nex_malloc_ctx, exceptions); 975 return p; 976 } 977 978 void *nex_memalign(size_t alignment, size_t size) 979 { 980 void *p; 981 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 982 983 p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx); 984 malloc_unlock(&nex_malloc_ctx, exceptions); 985 return p; 986 } 987 988 void nex_free(void *ptr) 989 { 990 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 991 992 raw_free(ptr, &nex_malloc_ctx, false /* !wipe */); 993 malloc_unlock(&nex_malloc_ctx, exceptions); 994 } 995 996 #else /* ENABLE_MDBG */ 997 998 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size) 999 { 1000 return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size); 1001 } 1002 1003 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size) 1004 { 1005 return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size); 1006 } 1007 1008 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size) 1009 { 1010 return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size); 1011 } 1012 1013 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment, 1014 size_t size) 1015 { 1016 return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size); 1017 } 1018 1019 void nex_mdbg_check(int bufdump) 1020 { 1021 gen_mdbg_check(&nex_malloc_ctx, bufdump); 1022 } 1023 1024 void nex_free(void *ptr) 1025 { 1026 uint32_t exceptions = malloc_lock(&nex_malloc_ctx); 1027 1028 gen_mdbg_free(&nex_malloc_ctx, ptr, false /* !wipe */); 1029 malloc_unlock(&nex_malloc_ctx, exceptions); 1030 } 1031 1032 #endif /* ENABLE_MDBG */ 1033 1034 void nex_malloc_add_pool(void *buf, size_t len) 1035 { 1036 gen_malloc_add_pool(&nex_malloc_ctx, buf, len); 1037 } 1038 1039 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len) 1040 { 1041 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len); 1042 } 1043 1044 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len) 1045 { 1046 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len); 1047 } 1048 1049 #ifdef BufStats 1050 1051 void nex_malloc_reset_stats(void) 1052 { 1053 gen_malloc_reset_stats(&nex_malloc_ctx); 1054 } 1055 1056 void nex_malloc_get_stats(struct malloc_stats *stats) 1057 { 1058 gen_malloc_get_stats(&nex_malloc_ctx, stats); 1059 } 1060 1061 #endif 1062 1063 #endif 1064