xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision ffe211e04816184a25a23e0ffaab7ede918579ec)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2025, Linaro Limited.
5  */
6 
7 #define PROTOTYPES
8 
9 /*
10  *  BGET CONFIGURATION
11  *  ==================
12  */
13 /* #define BGET_ENABLE_ALL_OPTIONS */
14 #ifdef BGET_ENABLE_OPTION
15 #define TestProg    20000	/* Generate built-in test program
16 				   if defined.  The value specifies
17 				   how many buffer allocation attempts
18 				   the test program should make. */
19 #endif
20 
21 
22 #ifdef __LP64__
23 #define SizeQuant   16
24 #endif
25 #ifdef __ILP32__
26 #define SizeQuant   8
27 #endif
28 				/* Buffer allocation size quantum:
29 				   all buffers allocated are a
30 				   multiple of this size.  This
31 				   MUST be a power of two. */
32 
33 #ifdef BGET_ENABLE_OPTION
34 #define BufDump     1		/* Define this symbol to enable the
35 				   bpoold() function which dumps the
36 				   buffers in a buffer pool. */
37 
38 #define BufValid    1		/* Define this symbol to enable the
39 				   bpoolv() function for validating
40 				   a buffer pool. */
41 
42 #define DumpData    1		/* Define this symbol to enable the
43 				   bufdump() function which allows
44 				   dumping the contents of an allocated
45 				   or free buffer. */
46 
47 #define BufStats    1		/* Define this symbol to enable the
48 				   bstats() function which calculates
49 				   the total free space in the buffer
50 				   pool, the largest available
51 				   buffer, and the total space
52 				   currently allocated. */
53 
54 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
55 				   pattern of garbage to trip up
56 				   miscreants who attempt to use
57 				   pointers into released buffers. */
58 
59 #define BestFit     1		/* Use a best fit algorithm when
60 				   searching for space for an
61 				   allocation request.  This uses
62 				   memory more efficiently, but
63 				   allocation will be much slower. */
64 
65 #define BECtl       1		/* Define this symbol to enable the
66 				   bectl() function for automatic
67 				   pool space control.  */
68 #endif
69 
70 #ifdef MEM_DEBUG
71 #undef NDEBUG
72 #define DumpData    1
73 #define BufValid    1
74 #define FreeWipe    1
75 #endif
76 
77 #ifdef CFG_WITH_STATS
78 #define BufStats    1
79 #endif
80 
81 #include <compiler.h>
82 #include <config.h>
83 #include <malloc.h>
84 #include <memtag.h>
85 #include <pta_stats.h>
86 #include <stdbool.h>
87 #include <stdint.h>
88 #include <stdlib_ext.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <trace.h>
92 #include <util.h>
93 
94 #if defined(__KERNEL__)
95 /* Compiling for TEE Core */
96 #include <kernel/asan.h>
97 #include <kernel/spinlock.h>
98 #include <kernel/unwind.h>
99 
100 static void *memset_unchecked(void *s, int c, size_t n)
101 {
102 	return asan_memset_unchecked(s, c, n);
103 }
104 
105 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
106 					     size_t n)
107 {
108 	return asan_memcpy_unchecked(dst, src, n);
109 }
110 
111 #else /*__KERNEL__*/
112 /* Compiling for TA */
113 
114 static void *memset_unchecked(void *s, int c, size_t n)
115 {
116 	return memset(s, c, n);
117 }
118 
119 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
120 					     size_t n)
121 {
122 	return memcpy(dst, src, n);
123 }
124 
125 #endif /*__KERNEL__*/
126 
127 #include "bget.c"		/* this is ugly, but this is bget */
128 
129 struct malloc_pool {
130 	void *buf;
131 	size_t len;
132 };
133 
134 struct malloc_ctx {
135 	struct bpoolset poolset;
136 	struct malloc_pool *pool;
137 	size_t pool_len;
138 #ifdef BufStats
139 	struct pta_stats_alloc mstats;
140 #endif
141 #ifdef __KERNEL__
142 	unsigned int spinlock;
143 #endif
144 };
145 
146 #ifdef __KERNEL__
147 
148 static uint32_t malloc_lock(struct malloc_ctx *ctx)
149 {
150 	return cpu_spin_lock_xsave(&ctx->spinlock);
151 }
152 
153 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
154 {
155 	cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
156 }
157 
158 #else  /* __KERNEL__ */
159 
160 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
161 {
162 	return 0;
163 }
164 
165 static void malloc_unlock(struct malloc_ctx *ctx __unused,
166 			  uint32_t exceptions __unused)
167 {
168 }
169 
170 #endif	/* __KERNEL__ */
171 
172 #define DEFINE_CTX(name) struct malloc_ctx name =		\
173 	{ .poolset = { .freelist = { {0, 0},			\
174 			{&name.poolset.freelist,		\
175 			 &name.poolset.freelist}}}}
176 
177 static DEFINE_CTX(malloc_ctx);
178 
179 #ifdef CFG_NS_VIRTUALIZATION
180 static __nex_data DEFINE_CTX(nex_malloc_ctx);
181 #endif
182 
183 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused)
184 {
185 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM)
186 	EMSG("Memory allocation failed: size %zu context %p", req_size, ctx);
187 	print_kernel_stack();
188 #endif
189 }
190 
191 /* Most of the stuff in this function is copied from bgetr() in bget.c */
192 static __maybe_unused bufsize bget_buf_size(void *buf)
193 {
194 	bufsize osize;          /* Old size of buffer */
195 	struct bhead *b;
196 
197 	b = BH(((char *)buf) - sizeof(struct bhead));
198 	osize = -b->bsize;
199 #ifdef BECtl
200 	if (osize == 0) {
201 		/*  Buffer acquired directly through acqfcn. */
202 		struct bdhead *bd;
203 
204 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
205 		osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
206 	} else
207 #endif
208 		osize -= sizeof(struct bhead);
209 	assert(osize > 0);
210 	return osize;
211 }
212 
213 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size)
214 {
215 	if (!buf)
216 		return NULL;
217 
218 	COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant);
219 
220 	if (MEMTAG_IS_ENABLED) {
221 		size_t sz = 0;
222 
223 		/*
224 		 * MEMTAG needs actual allocated size (>= SizeQuant),
225 		 * unlike ASan which tags only requested bytes. For
226 		 * malloc(0), bget allocates SizeQuant, so we pass
227 		 * MAX(requested_size, SizeQuant) to ensure correct tagging.
228 		 */
229 		requested_size = MAX(requested_size, SizeQuant);
230 
231 		sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE);
232 
233 		/*
234 		 * Allocated buffer can be larger than requested when
235 		 * allocating with memalign(), but we should never tag more
236 		 * than allocated.
237 		 */
238 		assert(bget_buf_size(buf) >= sz + hdr_size);
239 		return memtag_set_random_tags(buf, sz + hdr_size);
240 	}
241 
242 #if defined(__KERNEL__)
243 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
244 		asan_tag_access(buf, buf + hdr_size + requested_size);
245 #endif
246 	return buf;
247 }
248 
249 static void *maybe_untag_buf(void *buf)
250 {
251 	if (!buf)
252 		return NULL;
253 
254 	if (MEMTAG_IS_ENABLED) {
255 		size_t sz = 0;
256 
257 		memtag_assert_tag(buf); /* Trying to catch double free early */
258 		sz = bget_buf_size(memtag_strip_tag(buf));
259 		return memtag_set_tags(buf, sz, 0);
260 	}
261 
262 #if defined(__KERNEL__)
263 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
264 		asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf));
265 #endif
266 	return buf;
267 }
268 
269 static void *strip_tag(void *buf)
270 {
271 	if (MEMTAG_IS_ENABLED)
272 		return memtag_strip_tag(buf);
273 	return buf;
274 }
275 
276 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused)
277 {
278 #if defined(__KERNEL__)
279 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
280 #endif
281 }
282 
283 #ifdef BufStats
284 
285 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
286 				    size_t requested_size,
287 				    struct malloc_ctx *ctx)
288 {
289 	if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
290 		ctx->mstats.max_allocated = ctx->poolset.totalloc;
291 
292 	if (!p) {
293 		ctx->mstats.num_alloc_fail++;
294 		print_oom(requested_size, ctx);
295 		if (requested_size > ctx->mstats.biggest_alloc_fail) {
296 			ctx->mstats.biggest_alloc_fail = requested_size;
297 			ctx->mstats.biggest_alloc_fail_used =
298 				ctx->poolset.totalloc;
299 		}
300 	}
301 
302 	return maybe_tag_buf(p, hdr_size, requested_size);
303 }
304 
305 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
306 {
307 	uint32_t exceptions = malloc_lock(ctx);
308 
309 	ctx->mstats.max_allocated = 0;
310 	ctx->mstats.num_alloc_fail = 0;
311 	ctx->mstats.biggest_alloc_fail = 0;
312 	ctx->mstats.biggest_alloc_fail_used = 0;
313 	malloc_unlock(ctx, exceptions);
314 }
315 
316 void malloc_reset_stats(void)
317 {
318 	gen_malloc_reset_stats(&malloc_ctx);
319 }
320 
321 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
322 				 struct pta_stats_alloc *stats)
323 {
324 	uint32_t exceptions = malloc_lock(ctx);
325 
326 	raw_malloc_get_stats(ctx, stats);
327 	malloc_unlock(ctx, exceptions);
328 }
329 
330 void malloc_get_stats(struct pta_stats_alloc *stats)
331 {
332 	gen_malloc_get_stats(&malloc_ctx, stats);
333 }
334 
335 #else /* BufStats */
336 
337 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
338 				    size_t requested_size,
339 				    struct malloc_ctx *ctx )
340 {
341 	if (!p)
342 		print_oom(requested_size, ctx);
343 
344 	return maybe_tag_buf(p, hdr_size, requested_size);
345 }
346 
347 #endif /* BufStats */
348 
349 #ifdef BufValid
350 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
351 {
352 	size_t n;
353 
354 	for (n = 0; n < ctx->pool_len; n++)
355 		bpoolv(ctx->pool[n].buf);
356 }
357 #else
358 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
359 {
360 }
361 #endif
362 
363 struct bpool_iterator {
364 	struct bfhead *next_buf;
365 	size_t pool_idx;
366 };
367 
368 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
369 					struct bpool_iterator *iterator)
370 {
371 	iterator->pool_idx = 0;
372 	iterator->next_buf = BFH(ctx->pool[0].buf);
373 }
374 
375 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
376 		size_t *len, bool *isfree)
377 {
378 	struct bfhead *b = iterator->next_buf;
379 	bufsize bs = b->bh.bsize;
380 
381 	if (bs == ESent)
382 		return false;
383 
384 	if (bs < 0) {
385 		/* Allocated buffer */
386 		bs = -bs;
387 
388 		*isfree = false;
389 	} else {
390 		/* Free Buffer */
391 		*isfree = true;
392 
393 		/* Assert that the free list links are intact */
394 		assert(b->ql.blink->ql.flink == b);
395 		assert(b->ql.flink->ql.blink == b);
396 	}
397 
398 	*buf = (uint8_t *)b + sizeof(struct bhead);
399 	*len = bs - sizeof(struct bhead);
400 
401 	iterator->next_buf = BFH((uint8_t *)b + bs);
402 	return true;
403 }
404 
405 static bool bpool_foreach(struct malloc_ctx *ctx,
406 			  struct bpool_iterator *iterator, void **buf)
407 {
408 	while (true) {
409 		size_t len;
410 		bool isfree;
411 
412 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
413 			if (isfree)
414 				continue;
415 			return true;
416 		}
417 
418 		if ((iterator->pool_idx + 1) >= ctx->pool_len)
419 			return false;
420 
421 		iterator->pool_idx++;
422 		iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
423 	}
424 }
425 
426 /* Convenience macro for looping over all allocated buffers */
427 #define BPOOL_FOREACH(ctx, iterator, bp)		      \
428 	for (bpool_foreach_iterator_init((ctx),(iterator));   \
429 	     bpool_foreach((ctx),(iterator), (bp));)
430 
431 void *raw_malloc_flags(uint32_t flags, void *ptr, size_t hdr_size,
432 		       size_t ftr_size, size_t alignment, size_t pl_nmemb,
433 		       size_t pl_size, struct malloc_ctx *ctx)
434 {
435 	void *p = NULL;
436 	bufsize s = 0;
437 
438 	raw_malloc_validate_pools(ctx);
439 
440 	if (!alignment || !IS_POWER_OF_TWO(alignment))
441 		return NULL;
442 
443 	/* Compute total size, excluding hdr_size */
444 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
445 		goto out;
446 	if (ADD_OVERFLOW(s, ftr_size, &s))
447 		goto out;
448 
449 	/* BGET doesn't like 0 sized allocations */
450 	if (!s)
451 		s++;
452 
453 	if ((flags & MAF_ZERO_INIT) && !ptr)
454 		p = bgetz(alignment, hdr_size, s, &ctx->poolset);
455 	else
456 		p = bget(alignment, hdr_size, s, &ctx->poolset);
457 
458 	if (p && ptr) {
459 		void *old_ptr = maybe_untag_buf(ptr);
460 		bufsize old_sz = bget_buf_size(old_ptr);
461 		bufsize new_sz = s + hdr_size;
462 
463 		if (old_sz < new_sz) {
464 			memcpy_unchecked(p, old_ptr, old_sz);
465 			if (flags & MAF_ZERO_INIT)
466 				memset_unchecked((uint8_t *)p + old_sz, 0,
467 						 new_sz - old_sz);
468 		} else {
469 			memcpy_unchecked(p, old_ptr, new_sz);
470 		}
471 
472 		brel(old_ptr, &ctx->poolset, false /*!wipe*/);
473 	}
474 out:
475 	return raw_malloc_return_hook(p, hdr_size, pl_nmemb * pl_size, ctx);
476 }
477 
478 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
479 		   size_t pl_size, struct malloc_ctx *ctx)
480 {
481 	return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, alignment,
482 				1, pl_size, ctx);
483 }
484 
485 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
486 		 struct malloc_ctx *ctx)
487 {
488 	return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, 1, 1,
489 				pl_size, ctx);
490 }
491 
492 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe)
493 {
494 	raw_malloc_validate_pools(ctx);
495 
496 	if (ptr)
497 		brel(maybe_untag_buf(ptr), &ctx->poolset, wipe);
498 }
499 
500 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
501 		 size_t pl_size, struct malloc_ctx *ctx)
502 {
503 	return raw_malloc_flags(MAF_ZERO_INIT, NULL, hdr_size, ftr_size, 1,
504 				pl_nmemb, pl_size, ctx);
505 }
506 
507 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
508 		  size_t pl_size, struct malloc_ctx *ctx)
509 {
510 	return raw_malloc_flags(MAF_NULL, ptr, hdr_size, ftr_size, 1, 1,
511 				pl_size, ctx);
512 }
513 
514 struct mdbg_hdr {
515 	const char *fname;
516 	uint16_t line;
517 #ifdef __LP64__
518 	uint64_t pad;
519 #endif
520 	uint32_t pl_size;
521 	uint32_t magic;
522 };
523 
524 #define MDBG_HEADER_MAGIC	0xadadadad
525 #define MDBG_FOOTER_MAGIC	0xecececec
526 
527 static size_t mdbg_get_ftr_size(size_t pl_size)
528 {
529 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
530 
531 	return ftr_pad + sizeof(uint32_t);
532 }
533 
534 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
535 {
536 	uint32_t *footer;
537 
538 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
539 			      mdbg_get_ftr_size(hdr->pl_size));
540 	footer--;
541 	return strip_tag(footer);
542 }
543 
544 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
545 		int lineno, size_t pl_size)
546 {
547 	uint32_t *footer;
548 
549 	hdr->fname = fname;
550 	hdr->line = lineno;
551 	hdr->pl_size = pl_size;
552 	hdr->magic = MDBG_HEADER_MAGIC;
553 
554 	footer = mdbg_get_footer(hdr);
555 	*footer = MDBG_FOOTER_MAGIC;
556 }
557 
558 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
559 {
560 	assert(hdr->magic == MDBG_HEADER_MAGIC);
561 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
562 }
563 
564 static void *mem_alloc_unlocked(uint32_t flags, void *ptr, size_t alignment,
565 				size_t nmemb, size_t size, const char *fname,
566 				int lineno, struct malloc_ctx *ctx)
567 {
568 	struct mdbg_hdr *hdr = NULL;
569 	size_t ftr_size = 0;
570 	size_t hdr_size = 0;
571 
572 	/*
573 	 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM.
574 	 */
575 	static_assert((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0);
576 
577 	if (IS_ENABLED2(ENABLE_MDBG)) {
578 		if (ptr) {
579 			hdr = ptr;
580 			hdr--;
581 			assert_header(hdr);
582 		}
583 		ftr_size = mdbg_get_ftr_size(nmemb * size);
584 		hdr_size = sizeof(struct mdbg_hdr);
585 		ptr = hdr;
586 	}
587 
588 	ptr = raw_malloc_flags(flags, ptr, hdr_size, ftr_size, alignment, nmemb,
589 			       size, ctx);
590 
591 	if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
592 		hdr = ptr;
593 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
594 		hdr++;
595 		ptr = hdr;
596 	}
597 
598 	return ptr;
599 }
600 
601 static struct malloc_ctx *get_ctx(uint32_t flags __maybe_unused)
602 {
603 #ifdef CFG_NS_VIRTUALIZATION
604 	if (flags & MAF_NEX)
605 		return &nex_malloc_ctx;
606 #endif
607 	return &malloc_ctx;
608 }
609 
610 static void *mem_alloc(uint32_t flags, void *ptr, size_t alignment,
611 		       size_t nmemb, size_t size, const char *fname, int lineno)
612 {
613 	struct malloc_ctx *ctx = get_ctx(flags);
614 	uint32_t exceptions = 0;
615 	void *p = NULL;
616 
617 	exceptions = malloc_lock(ctx);
618 	p = mem_alloc_unlocked(flags, ptr, alignment, nmemb, size, fname,
619 			       lineno, ctx);
620 	malloc_unlock(ctx, exceptions);
621 
622 	return p;
623 }
624 
625 void free_flags(uint32_t flags, void *ptr)
626 {
627 	struct malloc_ctx *ctx = get_ctx(flags);
628 	uint32_t exceptions = 0;
629 
630 	exceptions = malloc_lock(ctx);
631 
632 	if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
633 		struct mdbg_hdr *hdr = ptr;
634 
635 		hdr--;
636 		assert_header(hdr);
637 		hdr->magic = 0;
638 		*mdbg_get_footer(hdr) = 0;
639 		ptr = hdr;
640 	}
641 
642 	raw_free(ptr, ctx, flags & MAF_FREE_WIPE);
643 
644 	malloc_unlock(ctx, exceptions);
645 }
646 
647 static void *get_payload_start_size(void *raw_buf, size_t *size)
648 {
649 	if (IS_ENABLED2(ENABLE_MDBG)) {
650 		struct mdbg_hdr *hdr = raw_buf;
651 
652 		assert(bget_buf_size(hdr) >= hdr->pl_size);
653 		*size = hdr->pl_size;
654 		return hdr + 1;
655 	}
656 
657 	*size = bget_buf_size(raw_buf);
658 	return raw_buf;
659 }
660 
661 /* For use in raw_malloc_add_pool() below */
662 #define realloc_unlocked(ctx, ptr, size)                                      \
663 	mem_alloc_unlocked(MAF_NULL, (ptr), 1, 1, (size), __FILE__, __LINE__, \
664 			   (ctx))
665 
666 #ifdef ENABLE_MDBG
667 void *__mdbg_alloc(uint32_t flags, void *ptr, size_t alignment, size_t nmemb,
668 		   size_t size, const char *fname, int lineno)
669 {
670 	return mem_alloc(flags, ptr, alignment, nmemb, size, fname, lineno);
671 }
672 
673 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
674 {
675 	struct bpool_iterator itr;
676 	void *b;
677 	uint32_t exceptions = malloc_lock(ctx);
678 
679 	raw_malloc_validate_pools(ctx);
680 
681 	BPOOL_FOREACH(ctx, &itr, &b) {
682 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
683 
684 		assert_header(hdr);
685 
686 		if (bufdump > 0) {
687 			const char *fname = hdr->fname;
688 
689 			if (!fname)
690 				fname = "unknown";
691 
692 			IMSG("buffer: %d bytes %s:%d",
693 				hdr->pl_size, fname, hdr->line);
694 		}
695 	}
696 
697 	malloc_unlock(ctx, exceptions);
698 }
699 
700 void mdbg_check(int bufdump)
701 {
702 	gen_mdbg_check(&malloc_ctx, bufdump);
703 }
704 #endif
705 
706 /*
707  * If malloc debug is enabled, malloc() and friends are redirected by macros
708  * to __mdbg_alloc() etc.
709  * We still want to export the standard entry points in case they are referenced
710  * by the application, either directly or via external libraries.
711  */
712 
713 #undef malloc
714 void *malloc(size_t size)
715 {
716 	return mem_alloc(MAF_NULL, NULL, 1, 1, size, __FILE__, __LINE__);
717 }
718 
719 #undef malloc_flags
720 void *malloc_flags(uint32_t flags, void *ptr, size_t alignment, size_t size)
721 {
722 	return mem_alloc(flags, ptr, alignment, 1, size, __FILE__, __LINE__);
723 }
724 
725 #undef calloc
726 void *calloc(size_t nmemb, size_t size)
727 {
728 	return mem_alloc(MAF_ZERO_INIT, NULL, 1, nmemb, size, __FILE__,
729 			 __LINE__);
730 }
731 
732 #undef realloc
733 void *realloc(void *ptr, size_t size)
734 {
735 	return mem_alloc(MAF_NULL, ptr, 1, 1, size, __FILE__, __LINE__);
736 }
737 
738 #undef memalign
739 void *memalign(size_t alignment, size_t size)
740 {
741 	return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
742 			 __LINE__);
743 }
744 
745 #if __STDC_VERSION__ >= 201112L
746 #undef aligned_alloc
747 void *aligned_alloc(size_t alignment, size_t size)
748 {
749 	if (size % alignment)
750 		return NULL;
751 
752 	return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
753 			 __LINE__);
754 }
755 #endif /* __STDC_VERSION__ */
756 
757 void free(void *ptr)
758 {
759 	free_flags(MAF_NULL, ptr);
760 }
761 
762 void free_wipe(void *ptr)
763 {
764 	free_flags(MAF_FREE_WIPE, ptr);
765 }
766 
767 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
768 {
769 	uint32_t exceptions = malloc_lock(ctx);
770 
771 	raw_malloc_add_pool(ctx, buf, len);
772 	malloc_unlock(ctx, exceptions);
773 }
774 
775 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
776 						void *buf, size_t len)
777 {
778 	uint32_t exceptions = malloc_lock(ctx);
779 	bool ret = false;
780 
781 	ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len);
782 	malloc_unlock(ctx, exceptions);
783 
784 	return ret;
785 }
786 
787 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
788 					    void *buf, size_t len)
789 {
790 	bool ret = false;
791 	uint32_t exceptions = malloc_lock(ctx);
792 
793 	ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len);
794 	malloc_unlock(ctx, exceptions);
795 	return ret;
796 }
797 
798 size_t raw_malloc_get_ctx_size(void)
799 {
800 	return sizeof(struct malloc_ctx);
801 }
802 
803 void raw_malloc_init_ctx(struct malloc_ctx *ctx)
804 {
805 	memset(ctx, 0, sizeof(*ctx));
806 	ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
807 	ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
808 }
809 
810 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
811 {
812 	const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead);
813 	uintptr_t start = (uintptr_t)buf;
814 	uintptr_t end = start + len;
815 	void *p = NULL;
816 	size_t l = 0;
817 
818 	start = ROUNDUP(start, SizeQuant);
819 	end = ROUNDDOWN(end, SizeQuant);
820 
821 	if (start > end || (end - start) < min_len) {
822 		DMSG("Skipping too small pool");
823 		return;
824 	}
825 
826 	/* First pool requires a bigger size */
827 	if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) {
828 		DMSG("Skipping too small initial pool");
829 		return;
830 	}
831 
832 	tag_asan_free((void *)start, end - start);
833 	bpool((void *)start, end - start, &ctx->poolset);
834 	l = ctx->pool_len + 1;
835 	p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
836 	assert(p);
837 	ctx->pool = p;
838 	ctx->pool[ctx->pool_len].buf = (void *)start;
839 	ctx->pool[ctx->pool_len].len = end - start;
840 #ifdef BufStats
841 	ctx->mstats.size += ctx->pool[ctx->pool_len].len;
842 #endif
843 	ctx->pool_len = l;
844 }
845 
846 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
847 				     void *buf, size_t len)
848 {
849 	uintptr_t buf_start = (uintptr_t)strip_tag(buf);
850 	uintptr_t buf_end = buf_start + len;
851 	size_t n = 0;
852 
853 	raw_malloc_validate_pools(ctx);
854 
855 	for (n = 0; n < ctx->pool_len; n++) {
856 		uintptr_t pool_start = (uintptr_t)strip_tag(ctx->pool[n].buf);
857 		uintptr_t pool_end = pool_start + ctx->pool[n].len;
858 
859 		if (buf_start > buf_end || pool_start > pool_end)
860 			return true;	/* Wrapping buffers, shouldn't happen */
861 
862 		if ((buf_start >= pool_start && buf_start < pool_end) ||
863 		    (buf_end > pool_start && buf_end < pool_end))
864 			return true;
865 	}
866 
867 	return false;
868 }
869 
870 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
871 					 void *buf, size_t len)
872 {
873 	struct bpool_iterator itr = { };
874 	void *b = NULL;
875 	uint8_t *start_buf = strip_tag(buf);
876 	uint8_t *end_buf = start_buf + len;
877 
878 	raw_malloc_validate_pools(ctx);
879 
880 	/* Check for wrapping */
881 	if (start_buf > end_buf)
882 		return false;
883 
884 	BPOOL_FOREACH(ctx, &itr, &b) {
885 		uint8_t *start_b = NULL;
886 		uint8_t *end_b = NULL;
887 		size_t s = 0;
888 
889 		start_b = strip_tag(get_payload_start_size(b, &s));
890 		end_b = start_b + s;
891 		if (start_buf >= start_b && end_buf <= end_b)
892 			return true;
893 	}
894 
895 	return false;
896 }
897 
898 #ifdef CFG_WITH_STATS
899 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct pta_stats_alloc *stats)
900 {
901 	memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats));
902 	stats->allocated = ctx->poolset.totalloc;
903 	stats->free2_sum = ctx->poolset.free2_sum;
904 }
905 #endif
906 
907 void malloc_add_pool(void *buf, size_t len)
908 {
909 	gen_malloc_add_pool(&malloc_ctx, buf, len);
910 }
911 
912 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
913 {
914 	return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
915 }
916 
917 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
918 {
919 	return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
920 }
921 
922 #ifdef CFG_NS_VIRTUALIZATION
923 
924 #ifndef ENABLE_MDBG
925 
926 void *nex_malloc(size_t size)
927 {
928 	return mem_alloc(MAF_NEX, NULL, 1, 1, size, __FILE__, __LINE__);
929 }
930 
931 void *nex_calloc(size_t nmemb, size_t size)
932 {
933 	return mem_alloc(MAF_NEX | MAF_ZERO_INIT, NULL, 1, nmemb, size,
934 			 __FILE__, __LINE__);
935 }
936 
937 void *nex_realloc(void *ptr, size_t size)
938 {
939 	return mem_alloc(MAF_NEX, ptr, 1, 1, size, __FILE__, __LINE__);
940 }
941 
942 void *nex_memalign(size_t alignment, size_t size)
943 {
944 	return mem_alloc(MAF_NEX, NULL, alignment, 1, size, __FILE__, __LINE__);
945 }
946 
947 #else  /* ENABLE_MDBG */
948 
949 void nex_mdbg_check(int bufdump)
950 {
951 	gen_mdbg_check(&nex_malloc_ctx, bufdump);
952 }
953 
954 #endif	/* ENABLE_MDBG */
955 
956 void nex_free(void *ptr)
957 {
958 	free_flags(MAF_NEX, ptr);
959 }
960 
961 void nex_malloc_add_pool(void *buf, size_t len)
962 {
963 	gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
964 }
965 
966 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
967 {
968 	return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
969 }
970 
971 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
972 {
973 	return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
974 }
975 
976 #ifdef BufStats
977 
978 void nex_malloc_reset_stats(void)
979 {
980 	gen_malloc_reset_stats(&nex_malloc_ctx);
981 }
982 
983 void nex_malloc_get_stats(struct pta_stats_alloc *stats)
984 {
985 	gen_malloc_get_stats(&nex_malloc_ctx, stats);
986 }
987 
988 #endif
989 
990 #endif
991