1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2015-2025, Linaro Limited.
5 */
6
7 #define PROTOTYPES
8
9 /*
10 * BGET CONFIGURATION
11 * ==================
12 */
13 /* #define BGET_ENABLE_ALL_OPTIONS */
14 #ifdef BGET_ENABLE_OPTION
15 #define TestProg 20000 /* Generate built-in test program
16 if defined. The value specifies
17 how many buffer allocation attempts
18 the test program should make. */
19 #endif
20
21
22 #ifdef __LP64__
23 #define SizeQuant 16
24 #endif
25 #ifdef __ILP32__
26 #define SizeQuant 8
27 #endif
28 /* Buffer allocation size quantum:
29 all buffers allocated are a
30 multiple of this size. This
31 MUST be a power of two. */
32
33 #ifdef BGET_ENABLE_OPTION
34 #define BufDump 1 /* Define this symbol to enable the
35 bpoold() function which dumps the
36 buffers in a buffer pool. */
37
38 #define BufValid 1 /* Define this symbol to enable the
39 bpoolv() function for validating
40 a buffer pool. */
41
42 #define DumpData 1 /* Define this symbol to enable the
43 bufdump() function which allows
44 dumping the contents of an allocated
45 or free buffer. */
46
47 #define BufStats 1 /* Define this symbol to enable the
48 bstats() function which calculates
49 the total free space in the buffer
50 pool, the largest available
51 buffer, and the total space
52 currently allocated. */
53
54 #define FreeWipe 1 /* Wipe free buffers to a guaranteed
55 pattern of garbage to trip up
56 miscreants who attempt to use
57 pointers into released buffers. */
58
59 #define BestFit 1 /* Use a best fit algorithm when
60 searching for space for an
61 allocation request. This uses
62 memory more efficiently, but
63 allocation will be much slower. */
64
65 #define BECtl 1 /* Define this symbol to enable the
66 bectl() function for automatic
67 pool space control. */
68 #endif
69
70 #ifdef MEM_DEBUG
71 #undef NDEBUG
72 #define DumpData 1
73 #define BufValid 1
74 #define FreeWipe 1
75 #endif
76
77 #ifdef CFG_WITH_STATS
78 #define BufStats 1
79 #endif
80
81 #include <compiler.h>
82 #include <config.h>
83 #include <malloc.h>
84 #include <memtag.h>
85 #include <pta_stats.h>
86 #include <stdbool.h>
87 #include <stdint.h>
88 #include <stdlib_ext.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <trace.h>
92 #include <util.h>
93
94 #if defined(__KERNEL__)
95 /* Compiling for TEE Core */
96 #include <kernel/asan.h>
97 #include <kernel/spinlock.h>
98 #include <kernel/unwind.h>
99
memset_unchecked(void * s,int c,size_t n)100 static void *memset_unchecked(void *s, int c, size_t n)
101 {
102 return asan_memset_unchecked(s, c, n);
103 }
104
memcpy_unchecked(void * dst,const void * src,size_t n)105 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
106 size_t n)
107 {
108 return asan_memcpy_unchecked(dst, src, n);
109 }
110
111 #else /*__KERNEL__*/
112 /* Compiling for TA */
113
memset_unchecked(void * s,int c,size_t n)114 static void *memset_unchecked(void *s, int c, size_t n)
115 {
116 return memset(s, c, n);
117 }
118
memcpy_unchecked(void * dst,const void * src,size_t n)119 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
120 size_t n)
121 {
122 return memcpy(dst, src, n);
123 }
124
125 #endif /*__KERNEL__*/
126
127 #include "bget.c" /* this is ugly, but this is bget */
128
129 struct malloc_pool {
130 void *buf;
131 size_t len;
132 };
133
134 struct malloc_ctx {
135 struct bpoolset poolset;
136 struct malloc_pool *pool;
137 size_t pool_len;
138 #ifdef BufStats
139 struct pta_stats_alloc mstats;
140 #endif
141 #ifdef __KERNEL__
142 unsigned int spinlock;
143 #endif
144 };
145
146 #ifdef __KERNEL__
147
malloc_lock(struct malloc_ctx * ctx)148 static uint32_t malloc_lock(struct malloc_ctx *ctx)
149 {
150 return cpu_spin_lock_xsave(&ctx->spinlock);
151 }
152
malloc_unlock(struct malloc_ctx * ctx,uint32_t exceptions)153 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
154 {
155 cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
156 }
157
158 #else /* __KERNEL__ */
159
malloc_lock(struct malloc_ctx * ctx __unused)160 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
161 {
162 return 0;
163 }
164
malloc_unlock(struct malloc_ctx * ctx __unused,uint32_t exceptions __unused)165 static void malloc_unlock(struct malloc_ctx *ctx __unused,
166 uint32_t exceptions __unused)
167 {
168 }
169
170 #endif /* __KERNEL__ */
171
172 #define DEFINE_CTX(name) struct malloc_ctx name = \
173 { .poolset = { .freelist = { {0, 0}, \
174 {&name.poolset.freelist, \
175 &name.poolset.freelist}}}}
176
177 static DEFINE_CTX(malloc_ctx);
178
179 #ifdef CFG_NS_VIRTUALIZATION
180 static __nex_data DEFINE_CTX(nex_malloc_ctx);
181 #endif
182
print_oom(size_t req_size __maybe_unused,void * ctx __maybe_unused)183 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused)
184 {
185 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM)
186 EMSG("Memory allocation failed: size %zu context %p", req_size, ctx);
187 print_kernel_stack();
188 #endif
189 }
190
191 /* Most of the stuff in this function is copied from bgetr() in bget.c */
bget_buf_size(void * buf)192 static __maybe_unused bufsize bget_buf_size(void *buf)
193 {
194 bufsize osize; /* Old size of buffer */
195 struct bhead *b;
196
197 b = BH(((char *)buf) - sizeof(struct bhead));
198 osize = -b->bsize;
199 #ifdef BECtl
200 if (osize == 0) {
201 /* Buffer acquired directly through acqfcn. */
202 struct bdhead *bd;
203
204 bd = BDH(((char *)buf) - sizeof(struct bdhead));
205 osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
206 } else
207 #endif
208 osize -= sizeof(struct bhead);
209 assert(osize > 0);
210 return osize;
211 }
212
maybe_tag_buf(uint8_t * buf,size_t hdr_size,size_t requested_size)213 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size)
214 {
215 if (!buf)
216 return NULL;
217
218 COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant);
219
220 if (MEMTAG_IS_ENABLED) {
221 size_t sz = 0;
222
223 /*
224 * MEMTAG needs actual allocated size (>= SizeQuant),
225 * unlike ASan which tags only requested bytes. For
226 * malloc(0), bget allocates SizeQuant, so we pass
227 * MAX(requested_size, SizeQuant) to ensure correct tagging.
228 */
229 requested_size = MAX(requested_size, SizeQuant);
230
231 sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE);
232
233 /*
234 * Allocated buffer can be larger than requested when
235 * allocating with memalign(), but we should never tag more
236 * than allocated.
237 */
238 assert(bget_buf_size(buf) >= sz + hdr_size);
239 return memtag_set_random_tags(buf, sz + hdr_size);
240 }
241
242 #if defined(__KERNEL__)
243 asan_tag_access(buf, buf + hdr_size + requested_size);
244 #endif
245 return buf;
246 }
247
maybe_untag_buf(void * buf)248 static void *maybe_untag_buf(void *buf)
249 {
250 if (!buf)
251 return NULL;
252
253 if (MEMTAG_IS_ENABLED) {
254 size_t sz = 0;
255
256 memtag_assert_tag(buf); /* Trying to catch double free early */
257 sz = bget_buf_size(memtag_strip_tag(buf));
258 return memtag_set_tags(buf, sz, 0);
259 }
260
261 #if defined(__KERNEL__)
262 asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf));
263 #endif
264 return buf;
265 }
266
strip_tag(void * buf)267 static void *strip_tag(void *buf)
268 {
269 if (MEMTAG_IS_ENABLED)
270 return memtag_strip_tag(buf);
271 return buf;
272 }
273
tag_asan_free(void * buf __maybe_unused,size_t len __maybe_unused)274 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused)
275 {
276 #if defined(__KERNEL__)
277 asan_tag_heap_free(buf, (uint8_t *)buf + len);
278 #endif
279 }
280
281 #ifdef BufStats
282
raw_malloc_return_hook(void * p,size_t hdr_size,size_t requested_size,struct malloc_ctx * ctx)283 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
284 size_t requested_size,
285 struct malloc_ctx *ctx)
286 {
287 if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
288 ctx->mstats.max_allocated = ctx->poolset.totalloc;
289
290 if (!p) {
291 ctx->mstats.num_alloc_fail++;
292 print_oom(requested_size, ctx);
293 if (requested_size > ctx->mstats.biggest_alloc_fail) {
294 ctx->mstats.biggest_alloc_fail = requested_size;
295 ctx->mstats.biggest_alloc_fail_used =
296 ctx->poolset.totalloc;
297 }
298 }
299
300 return maybe_tag_buf(p, hdr_size, requested_size);
301 }
302
gen_malloc_reset_stats(struct malloc_ctx * ctx)303 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
304 {
305 uint32_t exceptions = malloc_lock(ctx);
306
307 ctx->mstats.max_allocated = 0;
308 ctx->mstats.num_alloc_fail = 0;
309 ctx->mstats.biggest_alloc_fail = 0;
310 ctx->mstats.biggest_alloc_fail_used = 0;
311 malloc_unlock(ctx, exceptions);
312 }
313
malloc_reset_stats(void)314 void malloc_reset_stats(void)
315 {
316 gen_malloc_reset_stats(&malloc_ctx);
317 }
318
gen_malloc_get_stats(struct malloc_ctx * ctx,struct pta_stats_alloc * stats)319 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
320 struct pta_stats_alloc *stats)
321 {
322 uint32_t exceptions = malloc_lock(ctx);
323
324 raw_malloc_get_stats(ctx, stats);
325 malloc_unlock(ctx, exceptions);
326 }
327
malloc_get_stats(struct pta_stats_alloc * stats)328 void malloc_get_stats(struct pta_stats_alloc *stats)
329 {
330 gen_malloc_get_stats(&malloc_ctx, stats);
331 }
332
333 #else /* BufStats */
334
raw_malloc_return_hook(void * p,size_t hdr_size,size_t requested_size,struct malloc_ctx * ctx)335 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
336 size_t requested_size,
337 struct malloc_ctx *ctx )
338 {
339 if (!p)
340 print_oom(requested_size, ctx);
341
342 return maybe_tag_buf(p, hdr_size, requested_size);
343 }
344
345 #endif /* BufStats */
346
347 #ifdef BufValid
raw_malloc_validate_pools(struct malloc_ctx * ctx)348 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
349 {
350 size_t n;
351
352 for (n = 0; n < ctx->pool_len; n++)
353 bpoolv(ctx->pool[n].buf);
354 }
355 #else
raw_malloc_validate_pools(struct malloc_ctx * ctx __unused)356 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
357 {
358 }
359 #endif
360
361 struct bpool_iterator {
362 struct bfhead *next_buf;
363 size_t pool_idx;
364 };
365
bpool_foreach_iterator_init(struct malloc_ctx * ctx,struct bpool_iterator * iterator)366 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
367 struct bpool_iterator *iterator)
368 {
369 iterator->pool_idx = 0;
370 iterator->next_buf = BFH(ctx->pool[0].buf);
371 }
372
bpool_foreach_pool(struct bpool_iterator * iterator,void ** buf,size_t * len,bool * isfree)373 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
374 size_t *len, bool *isfree)
375 {
376 struct bfhead *b = iterator->next_buf;
377 bufsize bs = b->bh.bsize;
378
379 if (bs == ESent)
380 return false;
381
382 if (bs < 0) {
383 /* Allocated buffer */
384 bs = -bs;
385
386 *isfree = false;
387 } else {
388 /* Free Buffer */
389 *isfree = true;
390
391 /* Assert that the free list links are intact */
392 assert(b->ql.blink->ql.flink == b);
393 assert(b->ql.flink->ql.blink == b);
394 }
395
396 *buf = (uint8_t *)b + sizeof(struct bhead);
397 *len = bs - sizeof(struct bhead);
398
399 iterator->next_buf = BFH((uint8_t *)b + bs);
400 return true;
401 }
402
bpool_foreach(struct malloc_ctx * ctx,struct bpool_iterator * iterator,void ** buf)403 static bool bpool_foreach(struct malloc_ctx *ctx,
404 struct bpool_iterator *iterator, void **buf)
405 {
406 while (true) {
407 size_t len;
408 bool isfree;
409
410 if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
411 if (isfree)
412 continue;
413 return true;
414 }
415
416 if ((iterator->pool_idx + 1) >= ctx->pool_len)
417 return false;
418
419 iterator->pool_idx++;
420 iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
421 }
422 }
423
424 /* Convenience macro for looping over all allocated buffers */
425 #define BPOOL_FOREACH(ctx, iterator, bp) \
426 for (bpool_foreach_iterator_init((ctx),(iterator)); \
427 bpool_foreach((ctx),(iterator), (bp));)
428
raw_malloc_flags(uint32_t flags,void * ptr,size_t hdr_size,size_t ftr_size,size_t alignment,size_t pl_nmemb,size_t pl_size,struct malloc_ctx * ctx)429 void *raw_malloc_flags(uint32_t flags, void *ptr, size_t hdr_size,
430 size_t ftr_size, size_t alignment, size_t pl_nmemb,
431 size_t pl_size, struct malloc_ctx *ctx)
432 {
433 void *p = NULL;
434 bufsize s = 0;
435
436 raw_malloc_validate_pools(ctx);
437
438 if (!alignment || !IS_POWER_OF_TWO(alignment))
439 return NULL;
440
441 /* Compute total size, excluding hdr_size */
442 if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
443 goto out;
444 if (ADD_OVERFLOW(s, ftr_size, &s))
445 goto out;
446
447 /* BGET doesn't like 0 sized allocations */
448 if (!s)
449 s++;
450
451 if ((flags & MAF_ZERO_INIT) && !ptr)
452 p = bgetz(alignment, hdr_size, s, &ctx->poolset);
453 else
454 p = bget(alignment, hdr_size, s, &ctx->poolset);
455
456 if (p && ptr) {
457 void *old_ptr = maybe_untag_buf(ptr);
458 bufsize old_sz = bget_buf_size(old_ptr);
459 bufsize new_sz = s + hdr_size;
460
461 if (old_sz < new_sz) {
462 memcpy_unchecked(p, old_ptr, old_sz);
463 if (flags & MAF_ZERO_INIT)
464 memset_unchecked((uint8_t *)p + old_sz, 0,
465 new_sz - old_sz);
466 } else {
467 memcpy_unchecked(p, old_ptr, new_sz);
468 }
469
470 brel(old_ptr, &ctx->poolset, false /*!wipe*/);
471 }
472 out:
473 return raw_malloc_return_hook(p, hdr_size, pl_nmemb * pl_size, ctx);
474 }
475
raw_memalign(size_t hdr_size,size_t ftr_size,size_t alignment,size_t pl_size,struct malloc_ctx * ctx)476 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
477 size_t pl_size, struct malloc_ctx *ctx)
478 {
479 return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, alignment,
480 1, pl_size, ctx);
481 }
482
raw_malloc(size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)483 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
484 struct malloc_ctx *ctx)
485 {
486 return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, 1, 1,
487 pl_size, ctx);
488 }
489
raw_free(void * ptr,struct malloc_ctx * ctx,bool wipe)490 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe)
491 {
492 raw_malloc_validate_pools(ctx);
493
494 if (ptr)
495 brel(maybe_untag_buf(ptr), &ctx->poolset, wipe);
496 }
497
raw_calloc(size_t hdr_size,size_t ftr_size,size_t pl_nmemb,size_t pl_size,struct malloc_ctx * ctx)498 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
499 size_t pl_size, struct malloc_ctx *ctx)
500 {
501 return raw_malloc_flags(MAF_ZERO_INIT, NULL, hdr_size, ftr_size, 1,
502 pl_nmemb, pl_size, ctx);
503 }
504
raw_realloc(void * ptr,size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)505 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
506 size_t pl_size, struct malloc_ctx *ctx)
507 {
508 return raw_malloc_flags(MAF_NULL, ptr, hdr_size, ftr_size, 1, 1,
509 pl_size, ctx);
510 }
511
512 struct mdbg_hdr {
513 const char *fname;
514 uint16_t line;
515 #ifdef __LP64__
516 uint64_t pad;
517 #endif
518 uint32_t pl_size;
519 uint32_t magic;
520 };
521
522 #define MDBG_HEADER_MAGIC 0xadadadad
523 #define MDBG_FOOTER_MAGIC 0xecececec
524
mdbg_get_ftr_size(size_t pl_size)525 static size_t mdbg_get_ftr_size(size_t pl_size)
526 {
527 size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
528
529 return ftr_pad + sizeof(uint32_t);
530 }
531
mdbg_get_footer(struct mdbg_hdr * hdr)532 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
533 {
534 uint32_t *footer;
535
536 footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
537 mdbg_get_ftr_size(hdr->pl_size));
538 footer--;
539 return strip_tag(footer);
540 }
541
mdbg_update_hdr(struct mdbg_hdr * hdr,const char * fname,int lineno,size_t pl_size)542 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
543 int lineno, size_t pl_size)
544 {
545 uint32_t *footer;
546
547 hdr->fname = fname;
548 hdr->line = lineno;
549 hdr->pl_size = pl_size;
550 hdr->magic = MDBG_HEADER_MAGIC;
551
552 footer = mdbg_get_footer(hdr);
553 *footer = MDBG_FOOTER_MAGIC;
554 }
555
assert_header(struct mdbg_hdr * hdr __maybe_unused)556 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
557 {
558 assert(hdr->magic == MDBG_HEADER_MAGIC);
559 assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
560 }
561
mem_alloc_unlocked(uint32_t flags,void * ptr,size_t alignment,size_t nmemb,size_t size,const char * fname,int lineno,struct malloc_ctx * ctx)562 static void *mem_alloc_unlocked(uint32_t flags, void *ptr, size_t alignment,
563 size_t nmemb, size_t size, const char *fname,
564 int lineno, struct malloc_ctx *ctx)
565 {
566 struct mdbg_hdr *hdr = NULL;
567 size_t ftr_size = 0;
568 size_t hdr_size = 0;
569
570 /*
571 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM.
572 */
573 static_assert((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0);
574
575 if (IS_ENABLED2(ENABLE_MDBG)) {
576 if (ptr) {
577 hdr = ptr;
578 hdr--;
579 assert_header(hdr);
580 }
581 ftr_size = mdbg_get_ftr_size(nmemb * size);
582 hdr_size = sizeof(struct mdbg_hdr);
583 ptr = hdr;
584 }
585
586 ptr = raw_malloc_flags(flags, ptr, hdr_size, ftr_size, alignment, nmemb,
587 size, ctx);
588
589 if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
590 hdr = ptr;
591 mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
592 hdr++;
593 ptr = hdr;
594 }
595
596 return ptr;
597 }
598
get_ctx(uint32_t flags __maybe_unused)599 static struct malloc_ctx *get_ctx(uint32_t flags __maybe_unused)
600 {
601 #ifdef CFG_NS_VIRTUALIZATION
602 if (flags & MAF_NEX)
603 return &nex_malloc_ctx;
604 #endif
605 return &malloc_ctx;
606 }
607
mem_alloc(uint32_t flags,void * ptr,size_t alignment,size_t nmemb,size_t size,const char * fname,int lineno)608 static void *mem_alloc(uint32_t flags, void *ptr, size_t alignment,
609 size_t nmemb, size_t size, const char *fname, int lineno)
610 {
611 struct malloc_ctx *ctx = get_ctx(flags);
612 uint32_t exceptions = 0;
613 void *p = NULL;
614
615 exceptions = malloc_lock(ctx);
616 p = mem_alloc_unlocked(flags, ptr, alignment, nmemb, size, fname,
617 lineno, ctx);
618 malloc_unlock(ctx, exceptions);
619
620 return p;
621 }
622
free_flags(uint32_t flags,void * ptr)623 void free_flags(uint32_t flags, void *ptr)
624 {
625 struct malloc_ctx *ctx = get_ctx(flags);
626 uint32_t exceptions = 0;
627
628 exceptions = malloc_lock(ctx);
629
630 if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
631 struct mdbg_hdr *hdr = ptr;
632
633 hdr--;
634 assert_header(hdr);
635 hdr->magic = 0;
636 *mdbg_get_footer(hdr) = 0;
637 ptr = hdr;
638 }
639
640 raw_free(ptr, ctx, flags & MAF_FREE_WIPE);
641
642 malloc_unlock(ctx, exceptions);
643 }
644
get_payload_start_size(void * raw_buf,size_t * size)645 static void *get_payload_start_size(void *raw_buf, size_t *size)
646 {
647 if (IS_ENABLED2(ENABLE_MDBG)) {
648 struct mdbg_hdr *hdr = raw_buf;
649
650 assert(bget_buf_size(hdr) >= hdr->pl_size);
651 *size = hdr->pl_size;
652 return hdr + 1;
653 }
654
655 *size = bget_buf_size(raw_buf);
656 return raw_buf;
657 }
658
659 /* For use in raw_malloc_add_pool() below */
660 #define realloc_unlocked(ctx, ptr, size) \
661 mem_alloc_unlocked(MAF_NULL, (ptr), 1, 1, (size), __FILE__, __LINE__, \
662 (ctx))
663
664 #ifdef ENABLE_MDBG
__mdbg_alloc(uint32_t flags,void * ptr,size_t alignment,size_t nmemb,size_t size,const char * fname,int lineno)665 void *__mdbg_alloc(uint32_t flags, void *ptr, size_t alignment, size_t nmemb,
666 size_t size, const char *fname, int lineno)
667 {
668 return mem_alloc(flags, ptr, alignment, nmemb, size, fname, lineno);
669 }
670
gen_mdbg_check(struct malloc_ctx * ctx,int bufdump)671 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
672 {
673 struct bpool_iterator itr;
674 void *b;
675 uint32_t exceptions = malloc_lock(ctx);
676
677 raw_malloc_validate_pools(ctx);
678
679 BPOOL_FOREACH(ctx, &itr, &b) {
680 struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
681
682 assert_header(hdr);
683
684 if (bufdump > 0) {
685 const char *fname = hdr->fname;
686
687 if (!fname)
688 fname = "unknown";
689
690 IMSG("buffer: %d bytes %s:%d",
691 hdr->pl_size, fname, hdr->line);
692 }
693 }
694
695 malloc_unlock(ctx, exceptions);
696 }
697
mdbg_check(int bufdump)698 void mdbg_check(int bufdump)
699 {
700 gen_mdbg_check(&malloc_ctx, bufdump);
701 }
702 #endif
703
704 /*
705 * If malloc debug is enabled, malloc() and friends are redirected by macros
706 * to __mdbg_alloc() etc.
707 * We still want to export the standard entry points in case they are referenced
708 * by the application, either directly or via external libraries.
709 */
710
711 #undef malloc
malloc(size_t size)712 void *malloc(size_t size)
713 {
714 return mem_alloc(MAF_NULL, NULL, 1, 1, size, __FILE__, __LINE__);
715 }
716
717 #undef malloc_flags
malloc_flags(uint32_t flags,void * ptr,size_t alignment,size_t size)718 void *malloc_flags(uint32_t flags, void *ptr, size_t alignment, size_t size)
719 {
720 return mem_alloc(flags, ptr, alignment, 1, size, __FILE__, __LINE__);
721 }
722
723 #undef calloc
calloc(size_t nmemb,size_t size)724 void *calloc(size_t nmemb, size_t size)
725 {
726 return mem_alloc(MAF_ZERO_INIT, NULL, 1, nmemb, size, __FILE__,
727 __LINE__);
728 }
729
730 #undef realloc
realloc(void * ptr,size_t size)731 void *realloc(void *ptr, size_t size)
732 {
733 return mem_alloc(MAF_NULL, ptr, 1, 1, size, __FILE__, __LINE__);
734 }
735
736 #undef memalign
memalign(size_t alignment,size_t size)737 void *memalign(size_t alignment, size_t size)
738 {
739 return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
740 __LINE__);
741 }
742
743 #if __STDC_VERSION__ >= 201112L
744 #undef aligned_alloc
aligned_alloc(size_t alignment,size_t size)745 void *aligned_alloc(size_t alignment, size_t size)
746 {
747 if (size % alignment)
748 return NULL;
749
750 return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
751 __LINE__);
752 }
753 #endif /* __STDC_VERSION__ */
754
free(void * ptr)755 void free(void *ptr)
756 {
757 free_flags(MAF_NULL, ptr);
758 }
759
free_wipe(void * ptr)760 void free_wipe(void *ptr)
761 {
762 free_flags(MAF_FREE_WIPE, ptr);
763 }
764
gen_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)765 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
766 {
767 uint32_t exceptions = malloc_lock(ctx);
768
769 raw_malloc_add_pool(ctx, buf, len);
770 malloc_unlock(ctx, exceptions);
771 }
772
gen_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)773 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
774 void *buf, size_t len)
775 {
776 uint32_t exceptions = malloc_lock(ctx);
777 bool ret = false;
778
779 ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len);
780 malloc_unlock(ctx, exceptions);
781
782 return ret;
783 }
784
gen_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)785 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
786 void *buf, size_t len)
787 {
788 bool ret = false;
789 uint32_t exceptions = malloc_lock(ctx);
790
791 ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len);
792 malloc_unlock(ctx, exceptions);
793 return ret;
794 }
795
raw_malloc_get_ctx_size(void)796 size_t raw_malloc_get_ctx_size(void)
797 {
798 return sizeof(struct malloc_ctx);
799 }
800
raw_malloc_init_ctx(struct malloc_ctx * ctx)801 void raw_malloc_init_ctx(struct malloc_ctx *ctx)
802 {
803 memset(ctx, 0, sizeof(*ctx));
804 ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
805 ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
806 }
807
raw_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)808 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
809 {
810 const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead);
811 uintptr_t start = (uintptr_t)buf;
812 uintptr_t end = start + len;
813 void *p = NULL;
814 size_t l = 0;
815
816 start = ROUNDUP(start, SizeQuant);
817 end = ROUNDDOWN(end, SizeQuant);
818
819 if (start > end || (end - start) < min_len) {
820 DMSG("Skipping too small pool");
821 return;
822 }
823
824 /* First pool requires a bigger size */
825 if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) {
826 DMSG("Skipping too small initial pool");
827 return;
828 }
829
830 tag_asan_free((void *)start, end - start);
831 bpool((void *)start, end - start, &ctx->poolset);
832 l = ctx->pool_len + 1;
833 p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
834 assert(p);
835 ctx->pool = p;
836 ctx->pool[ctx->pool_len].buf = (void *)start;
837 ctx->pool[ctx->pool_len].len = end - start;
838 #ifdef BufStats
839 ctx->mstats.size += ctx->pool[ctx->pool_len].len;
840 #endif
841 ctx->pool_len = l;
842 }
843
raw_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)844 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
845 void *buf, size_t len)
846 {
847 uintptr_t buf_start = (uintptr_t)strip_tag(buf);
848 uintptr_t buf_end = buf_start + len;
849 size_t n = 0;
850
851 raw_malloc_validate_pools(ctx);
852
853 for (n = 0; n < ctx->pool_len; n++) {
854 uintptr_t pool_start = (uintptr_t)strip_tag(ctx->pool[n].buf);
855 uintptr_t pool_end = pool_start + ctx->pool[n].len;
856
857 if (buf_start > buf_end || pool_start > pool_end)
858 return true; /* Wrapping buffers, shouldn't happen */
859
860 if ((buf_start >= pool_start && buf_start < pool_end) ||
861 (buf_end > pool_start && buf_end < pool_end))
862 return true;
863 }
864
865 return false;
866 }
867
raw_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)868 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
869 void *buf, size_t len)
870 {
871 struct bpool_iterator itr = { };
872 void *b = NULL;
873 uint8_t *start_buf = strip_tag(buf);
874 uint8_t *end_buf = start_buf + len;
875
876 raw_malloc_validate_pools(ctx);
877
878 /* Check for wrapping */
879 if (start_buf > end_buf)
880 return false;
881
882 BPOOL_FOREACH(ctx, &itr, &b) {
883 uint8_t *start_b = NULL;
884 uint8_t *end_b = NULL;
885 size_t s = 0;
886
887 start_b = strip_tag(get_payload_start_size(b, &s));
888 end_b = start_b + s;
889 if (start_buf >= start_b && end_buf <= end_b)
890 return true;
891 }
892
893 return false;
894 }
895
896 #ifdef CFG_WITH_STATS
raw_malloc_get_stats(struct malloc_ctx * ctx,struct pta_stats_alloc * stats)897 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct pta_stats_alloc *stats)
898 {
899 memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats));
900 stats->allocated = ctx->poolset.totalloc;
901 stats->free2_sum = ctx->poolset.free2_sum;
902 }
903 #endif
904
malloc_add_pool(void * buf,size_t len)905 void malloc_add_pool(void *buf, size_t len)
906 {
907 gen_malloc_add_pool(&malloc_ctx, buf, len);
908 }
909
malloc_buffer_is_within_alloced(void * buf,size_t len)910 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
911 {
912 return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
913 }
914
malloc_buffer_overlaps_heap(void * buf,size_t len)915 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
916 {
917 return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
918 }
919
920 #ifdef CFG_NS_VIRTUALIZATION
921
922 #ifndef ENABLE_MDBG
923
nex_malloc(size_t size)924 void *nex_malloc(size_t size)
925 {
926 return mem_alloc(MAF_NEX, NULL, 1, 1, size, __FILE__, __LINE__);
927 }
928
nex_calloc(size_t nmemb,size_t size)929 void *nex_calloc(size_t nmemb, size_t size)
930 {
931 return mem_alloc(MAF_NEX | MAF_ZERO_INIT, NULL, 1, nmemb, size,
932 __FILE__, __LINE__);
933 }
934
nex_realloc(void * ptr,size_t size)935 void *nex_realloc(void *ptr, size_t size)
936 {
937 return mem_alloc(MAF_NEX, ptr, 1, 1, size, __FILE__, __LINE__);
938 }
939
nex_memalign(size_t alignment,size_t size)940 void *nex_memalign(size_t alignment, size_t size)
941 {
942 return mem_alloc(MAF_NEX, NULL, alignment, 1, size, __FILE__, __LINE__);
943 }
944
945 #else /* ENABLE_MDBG */
946
nex_mdbg_check(int bufdump)947 void nex_mdbg_check(int bufdump)
948 {
949 gen_mdbg_check(&nex_malloc_ctx, bufdump);
950 }
951
952 #endif /* ENABLE_MDBG */
953
nex_free(void * ptr)954 void nex_free(void *ptr)
955 {
956 free_flags(MAF_NEX, ptr);
957 }
958
nex_malloc_add_pool(void * buf,size_t len)959 void nex_malloc_add_pool(void *buf, size_t len)
960 {
961 gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
962 }
963
nex_malloc_buffer_is_within_alloced(void * buf,size_t len)964 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
965 {
966 return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
967 }
968
nex_malloc_buffer_overlaps_heap(void * buf,size_t len)969 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
970 {
971 return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
972 }
973
974 #ifdef BufStats
975
nex_malloc_reset_stats(void)976 void nex_malloc_reset_stats(void)
977 {
978 gen_malloc_reset_stats(&nex_malloc_ctx);
979 }
980
nex_malloc_get_stats(struct pta_stats_alloc * stats)981 void nex_malloc_get_stats(struct pta_stats_alloc *stats)
982 {
983 gen_malloc_get_stats(&nex_malloc_ctx, stats);
984 }
985
986 #endif
987
988 #endif
989