xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 6cb664fb5c0fb0891d40d7e993770508e3bd1201)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2025, Linaro Limited.
5  */
6 
7 #define PROTOTYPES
8 
9 /*
10  *  BGET CONFIGURATION
11  *  ==================
12  */
13 /* #define BGET_ENABLE_ALL_OPTIONS */
14 #ifdef BGET_ENABLE_OPTION
15 #define TestProg    20000	/* Generate built-in test program
16 				   if defined.  The value specifies
17 				   how many buffer allocation attempts
18 				   the test program should make. */
19 #endif
20 
21 
22 #ifdef __LP64__
23 #define SizeQuant   16
24 #endif
25 #ifdef __ILP32__
26 #define SizeQuant   8
27 #endif
28 				/* Buffer allocation size quantum:
29 				   all buffers allocated are a
30 				   multiple of this size.  This
31 				   MUST be a power of two. */
32 
33 #ifdef BGET_ENABLE_OPTION
34 #define BufDump     1		/* Define this symbol to enable the
35 				   bpoold() function which dumps the
36 				   buffers in a buffer pool. */
37 
38 #define BufValid    1		/* Define this symbol to enable the
39 				   bpoolv() function for validating
40 				   a buffer pool. */
41 
42 #define DumpData    1		/* Define this symbol to enable the
43 				   bufdump() function which allows
44 				   dumping the contents of an allocated
45 				   or free buffer. */
46 
47 #define BufStats    1		/* Define this symbol to enable the
48 				   bstats() function which calculates
49 				   the total free space in the buffer
50 				   pool, the largest available
51 				   buffer, and the total space
52 				   currently allocated. */
53 
54 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
55 				   pattern of garbage to trip up
56 				   miscreants who attempt to use
57 				   pointers into released buffers. */
58 
59 #define BestFit     1		/* Use a best fit algorithm when
60 				   searching for space for an
61 				   allocation request.  This uses
62 				   memory more efficiently, but
63 				   allocation will be much slower. */
64 
65 #define BECtl       1		/* Define this symbol to enable the
66 				   bectl() function for automatic
67 				   pool space control.  */
68 #endif
69 
70 #ifdef MEM_DEBUG
71 #undef NDEBUG
72 #define DumpData    1
73 #define BufValid    1
74 #define FreeWipe    1
75 #endif
76 
77 #ifdef CFG_WITH_STATS
78 #define BufStats    1
79 #endif
80 
81 #include <compiler.h>
82 #include <config.h>
83 #include <malloc.h>
84 #include <memtag.h>
85 #include <pta_stats.h>
86 #include <stdbool.h>
87 #include <stdint.h>
88 #include <stdlib_ext.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <trace.h>
92 #include <util.h>
93 
94 #if defined(__KERNEL__)
95 /* Compiling for TEE Core */
96 #include <kernel/asan.h>
97 #include <kernel/spinlock.h>
98 #include <kernel/unwind.h>
99 
100 static void *memset_unchecked(void *s, int c, size_t n)
101 {
102 	return asan_memset_unchecked(s, c, n);
103 }
104 
105 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
106 					     size_t n)
107 {
108 	return asan_memcpy_unchecked(dst, src, n);
109 }
110 
111 #else /*__KERNEL__*/
112 /* Compiling for TA */
113 
114 static void *memset_unchecked(void *s, int c, size_t n)
115 {
116 	return memset(s, c, n);
117 }
118 
119 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
120 					     size_t n)
121 {
122 	return memcpy(dst, src, n);
123 }
124 
125 #endif /*__KERNEL__*/
126 
127 #include "bget.c"		/* this is ugly, but this is bget */
128 
129 struct malloc_pool {
130 	void *buf;
131 	size_t len;
132 };
133 
134 struct malloc_ctx {
135 	struct bpoolset poolset;
136 	struct malloc_pool *pool;
137 	size_t pool_len;
138 #ifdef BufStats
139 	struct pta_stats_alloc mstats;
140 #endif
141 #ifdef __KERNEL__
142 	unsigned int spinlock;
143 #endif
144 };
145 
146 #ifdef __KERNEL__
147 
148 static uint32_t malloc_lock(struct malloc_ctx *ctx)
149 {
150 	return cpu_spin_lock_xsave(&ctx->spinlock);
151 }
152 
153 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
154 {
155 	cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
156 }
157 
158 #else  /* __KERNEL__ */
159 
160 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
161 {
162 	return 0;
163 }
164 
165 static void malloc_unlock(struct malloc_ctx *ctx __unused,
166 			  uint32_t exceptions __unused)
167 {
168 }
169 
170 #endif	/* __KERNEL__ */
171 
172 #define DEFINE_CTX(name) struct malloc_ctx name =		\
173 	{ .poolset = { .freelist = { {0, 0},			\
174 			{&name.poolset.freelist,		\
175 			 &name.poolset.freelist}}}}
176 
177 static DEFINE_CTX(malloc_ctx);
178 
179 #ifdef CFG_NS_VIRTUALIZATION
180 static __nex_data DEFINE_CTX(nex_malloc_ctx);
181 #endif
182 
183 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused)
184 {
185 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM)
186 	EMSG("Memory allocation failed: size %zu context %p", req_size, ctx);
187 	print_kernel_stack();
188 #endif
189 }
190 
191 /* Most of the stuff in this function is copied from bgetr() in bget.c */
192 static __maybe_unused bufsize bget_buf_size(void *buf)
193 {
194 	bufsize osize;          /* Old size of buffer */
195 	struct bhead *b;
196 
197 	b = BH(((char *)buf) - sizeof(struct bhead));
198 	osize = -b->bsize;
199 #ifdef BECtl
200 	if (osize == 0) {
201 		/*  Buffer acquired directly through acqfcn. */
202 		struct bdhead *bd;
203 
204 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
205 		osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
206 	} else
207 #endif
208 		osize -= sizeof(struct bhead);
209 	assert(osize > 0);
210 	return osize;
211 }
212 
213 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size)
214 {
215 	if (!buf)
216 		return NULL;
217 
218 	COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant);
219 
220 	if (MEMTAG_IS_ENABLED) {
221 		size_t sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE);
222 
223 		/*
224 		 * Allocated buffer can be larger than requested when
225 		 * allocating with memalign(), but we should never tag more
226 		 * than allocated.
227 		 */
228 		assert(bget_buf_size(buf) >= sz + hdr_size);
229 		return memtag_set_random_tags(buf, sz + hdr_size);
230 	}
231 
232 #if defined(__KERNEL__)
233 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
234 		asan_tag_access(buf, buf + hdr_size + requested_size);
235 #endif
236 	return buf;
237 }
238 
239 static void *maybe_untag_buf(void *buf)
240 {
241 	if (!buf)
242 		return NULL;
243 
244 	if (MEMTAG_IS_ENABLED) {
245 		size_t sz = 0;
246 
247 		memtag_assert_tag(buf); /* Trying to catch double free early */
248 		sz = bget_buf_size(memtag_strip_tag(buf));
249 		return memtag_set_tags(buf, sz, 0);
250 	}
251 
252 #if defined(__KERNEL__)
253 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
254 		asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf));
255 #endif
256 	return buf;
257 }
258 
259 static void *strip_tag(void *buf)
260 {
261 	if (MEMTAG_IS_ENABLED)
262 		return memtag_strip_tag(buf);
263 	return buf;
264 }
265 
266 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused)
267 {
268 #if defined(__KERNEL__)
269 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
270 #endif
271 }
272 
273 #ifdef BufStats
274 
275 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
276 				    size_t requested_size,
277 				    struct malloc_ctx *ctx)
278 {
279 	if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
280 		ctx->mstats.max_allocated = ctx->poolset.totalloc;
281 
282 	if (!p) {
283 		ctx->mstats.num_alloc_fail++;
284 		print_oom(requested_size, ctx);
285 		if (requested_size > ctx->mstats.biggest_alloc_fail) {
286 			ctx->mstats.biggest_alloc_fail = requested_size;
287 			ctx->mstats.biggest_alloc_fail_used =
288 				ctx->poolset.totalloc;
289 		}
290 	}
291 
292 	return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size));
293 }
294 
295 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
296 {
297 	uint32_t exceptions = malloc_lock(ctx);
298 
299 	ctx->mstats.max_allocated = 0;
300 	ctx->mstats.num_alloc_fail = 0;
301 	ctx->mstats.biggest_alloc_fail = 0;
302 	ctx->mstats.biggest_alloc_fail_used = 0;
303 	malloc_unlock(ctx, exceptions);
304 }
305 
306 void malloc_reset_stats(void)
307 {
308 	gen_malloc_reset_stats(&malloc_ctx);
309 }
310 
311 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
312 				 struct pta_stats_alloc *stats)
313 {
314 	uint32_t exceptions = malloc_lock(ctx);
315 
316 	raw_malloc_get_stats(ctx, stats);
317 	malloc_unlock(ctx, exceptions);
318 }
319 
320 void malloc_get_stats(struct pta_stats_alloc *stats)
321 {
322 	gen_malloc_get_stats(&malloc_ctx, stats);
323 }
324 
325 #else /* BufStats */
326 
327 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
328 				    size_t requested_size,
329 				    struct malloc_ctx *ctx )
330 {
331 	if (!p)
332 		print_oom(requested_size, ctx);
333 
334 	return maybe_tag_buf(p, hdr_size, MAX(SizeQuant, requested_size));
335 }
336 
337 #endif /* BufStats */
338 
339 #ifdef BufValid
340 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
341 {
342 	size_t n;
343 
344 	for (n = 0; n < ctx->pool_len; n++)
345 		bpoolv(ctx->pool[n].buf);
346 }
347 #else
348 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
349 {
350 }
351 #endif
352 
353 struct bpool_iterator {
354 	struct bfhead *next_buf;
355 	size_t pool_idx;
356 };
357 
358 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
359 					struct bpool_iterator *iterator)
360 {
361 	iterator->pool_idx = 0;
362 	iterator->next_buf = BFH(ctx->pool[0].buf);
363 }
364 
365 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
366 		size_t *len, bool *isfree)
367 {
368 	struct bfhead *b = iterator->next_buf;
369 	bufsize bs = b->bh.bsize;
370 
371 	if (bs == ESent)
372 		return false;
373 
374 	if (bs < 0) {
375 		/* Allocated buffer */
376 		bs = -bs;
377 
378 		*isfree = false;
379 	} else {
380 		/* Free Buffer */
381 		*isfree = true;
382 
383 		/* Assert that the free list links are intact */
384 		assert(b->ql.blink->ql.flink == b);
385 		assert(b->ql.flink->ql.blink == b);
386 	}
387 
388 	*buf = (uint8_t *)b + sizeof(struct bhead);
389 	*len = bs - sizeof(struct bhead);
390 
391 	iterator->next_buf = BFH((uint8_t *)b + bs);
392 	return true;
393 }
394 
395 static bool bpool_foreach(struct malloc_ctx *ctx,
396 			  struct bpool_iterator *iterator, void **buf)
397 {
398 	while (true) {
399 		size_t len;
400 		bool isfree;
401 
402 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
403 			if (isfree)
404 				continue;
405 			return true;
406 		}
407 
408 		if ((iterator->pool_idx + 1) >= ctx->pool_len)
409 			return false;
410 
411 		iterator->pool_idx++;
412 		iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
413 	}
414 }
415 
416 /* Convenience macro for looping over all allocated buffers */
417 #define BPOOL_FOREACH(ctx, iterator, bp)		      \
418 	for (bpool_foreach_iterator_init((ctx),(iterator));   \
419 	     bpool_foreach((ctx),(iterator), (bp));)
420 
421 void *raw_malloc_flags(uint32_t flags, void *ptr, size_t hdr_size,
422 		       size_t ftr_size, size_t alignment, size_t pl_nmemb,
423 		       size_t pl_size, struct malloc_ctx *ctx)
424 {
425 	void *p = NULL;
426 	bufsize s = 0;
427 
428 	raw_malloc_validate_pools(ctx);
429 
430 	if (!alignment || !IS_POWER_OF_TWO(alignment))
431 		return NULL;
432 
433 	/* Compute total size, excluding hdr_size */
434 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
435 		goto out;
436 	if (ADD_OVERFLOW(s, ftr_size, &s))
437 		goto out;
438 
439 	/* BGET doesn't like 0 sized allocations */
440 	if (!s)
441 		s++;
442 
443 	if ((flags & MAF_ZERO_INIT) && !ptr)
444 		p = bgetz(alignment, hdr_size, s, &ctx->poolset);
445 	else
446 		p = bget(alignment, hdr_size, s, &ctx->poolset);
447 
448 	if (p && ptr) {
449 		void *old_ptr = maybe_untag_buf(ptr);
450 		bufsize old_sz = bget_buf_size(old_ptr);
451 		bufsize new_sz = s + hdr_size;
452 
453 		if (old_sz < new_sz) {
454 			memcpy_unchecked(p, old_ptr, old_sz);
455 			if (flags & MAF_ZERO_INIT)
456 				memset_unchecked((uint8_t *)p + old_sz, 0,
457 						 new_sz - old_sz);
458 		} else {
459 			memcpy_unchecked(p, old_ptr, new_sz);
460 		}
461 
462 		brel(old_ptr, &ctx->poolset, false /*!wipe*/);
463 	}
464 out:
465 	return raw_malloc_return_hook(p, hdr_size, pl_nmemb * pl_size, ctx);
466 }
467 
468 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
469 		   size_t pl_size, struct malloc_ctx *ctx)
470 {
471 	return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, alignment,
472 				1, pl_size, ctx);
473 }
474 
475 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
476 		 struct malloc_ctx *ctx)
477 {
478 	return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, 1, 1,
479 				pl_size, ctx);
480 }
481 
482 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe)
483 {
484 	raw_malloc_validate_pools(ctx);
485 
486 	if (ptr)
487 		brel(maybe_untag_buf(ptr), &ctx->poolset, wipe);
488 }
489 
490 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
491 		 size_t pl_size, struct malloc_ctx *ctx)
492 {
493 	return raw_malloc_flags(MAF_ZERO_INIT, NULL, hdr_size, ftr_size, 1,
494 				pl_nmemb, pl_size, ctx);
495 }
496 
497 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
498 		  size_t pl_size, struct malloc_ctx *ctx)
499 {
500 	return raw_malloc_flags(MAF_NULL, ptr, hdr_size, ftr_size, 1, 1,
501 				pl_size, ctx);
502 }
503 
504 struct mdbg_hdr {
505 	const char *fname;
506 	uint16_t line;
507 #ifdef __LP64__
508 	uint64_t pad;
509 #endif
510 	uint32_t pl_size;
511 	uint32_t magic;
512 };
513 
514 #define MDBG_HEADER_MAGIC	0xadadadad
515 #define MDBG_FOOTER_MAGIC	0xecececec
516 
517 static size_t mdbg_get_ftr_size(size_t pl_size)
518 {
519 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
520 
521 	return ftr_pad + sizeof(uint32_t);
522 }
523 
524 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
525 {
526 	uint32_t *footer;
527 
528 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
529 			      mdbg_get_ftr_size(hdr->pl_size));
530 	footer--;
531 	return strip_tag(footer);
532 }
533 
534 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
535 		int lineno, size_t pl_size)
536 {
537 	uint32_t *footer;
538 
539 	hdr->fname = fname;
540 	hdr->line = lineno;
541 	hdr->pl_size = pl_size;
542 	hdr->magic = MDBG_HEADER_MAGIC;
543 
544 	footer = mdbg_get_footer(hdr);
545 	*footer = MDBG_FOOTER_MAGIC;
546 }
547 
548 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
549 {
550 	assert(hdr->magic == MDBG_HEADER_MAGIC);
551 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
552 }
553 
554 static void *mem_alloc_unlocked(uint32_t flags, void *ptr, size_t alignment,
555 				size_t nmemb, size_t size, const char *fname,
556 				int lineno, struct malloc_ctx *ctx)
557 {
558 	struct mdbg_hdr *hdr = NULL;
559 	size_t ftr_size = 0;
560 	size_t hdr_size = 0;
561 
562 	/*
563 	 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM.
564 	 */
565 	static_assert((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0);
566 
567 	if (IS_ENABLED2(ENABLE_MDBG)) {
568 		if (ptr) {
569 			hdr = ptr;
570 			hdr--;
571 			assert_header(hdr);
572 		}
573 		ftr_size = mdbg_get_ftr_size(nmemb * size);
574 		hdr_size = sizeof(struct mdbg_hdr);
575 		ptr = hdr;
576 	}
577 
578 	ptr = raw_malloc_flags(flags, ptr, hdr_size, ftr_size, alignment, nmemb,
579 			       size, ctx);
580 
581 	if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
582 		hdr = ptr;
583 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
584 		hdr++;
585 		ptr = hdr;
586 	}
587 
588 	return ptr;
589 }
590 
591 static struct malloc_ctx *get_ctx(uint32_t flags __maybe_unused)
592 {
593 #ifdef CFG_NS_VIRTUALIZATION
594 	if (flags & MAF_NEX)
595 		return &nex_malloc_ctx;
596 #endif
597 	return &malloc_ctx;
598 }
599 
600 static void *mem_alloc(uint32_t flags, void *ptr, size_t alignment,
601 		       size_t nmemb, size_t size, const char *fname, int lineno)
602 {
603 	struct malloc_ctx *ctx = get_ctx(flags);
604 	uint32_t exceptions = 0;
605 	void *p = NULL;
606 
607 	exceptions = malloc_lock(ctx);
608 	p = mem_alloc_unlocked(flags, ptr, alignment, nmemb, size, fname,
609 			       lineno, ctx);
610 	malloc_unlock(ctx, exceptions);
611 
612 	return p;
613 }
614 
615 void free_flags(uint32_t flags, void *ptr)
616 {
617 	struct malloc_ctx *ctx = get_ctx(flags);
618 	uint32_t exceptions = 0;
619 
620 	exceptions = malloc_lock(ctx);
621 
622 	if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
623 		struct mdbg_hdr *hdr = ptr;
624 
625 		hdr--;
626 		assert_header(hdr);
627 		hdr->magic = 0;
628 		*mdbg_get_footer(hdr) = 0;
629 		ptr = hdr;
630 	}
631 
632 	raw_free(ptr, ctx, flags & MAF_FREE_WIPE);
633 
634 	malloc_unlock(ctx, exceptions);
635 }
636 
637 static void *get_payload_start_size(void *raw_buf, size_t *size)
638 {
639 	if (IS_ENABLED2(ENABLE_MDBG)) {
640 		struct mdbg_hdr *hdr = raw_buf;
641 
642 		assert(bget_buf_size(hdr) >= hdr->pl_size);
643 		*size = hdr->pl_size;
644 		return hdr + 1;
645 	}
646 
647 	*size = bget_buf_size(raw_buf);
648 	return raw_buf;
649 }
650 
651 /* For use in raw_malloc_add_pool() below */
652 #define realloc_unlocked(ctx, ptr, size)                                      \
653 	mem_alloc_unlocked(MAF_NULL, (ptr), 1, 1, (size), __FILE__, __LINE__, \
654 			   (ctx))
655 
656 #ifdef ENABLE_MDBG
657 void *__mdbg_alloc(uint32_t flags, void *ptr, size_t alignment, size_t nmemb,
658 		   size_t size, const char *fname, int lineno)
659 {
660 	return mem_alloc(flags, ptr, alignment, nmemb, size, fname, lineno);
661 }
662 
663 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
664 {
665 	struct bpool_iterator itr;
666 	void *b;
667 	uint32_t exceptions = malloc_lock(ctx);
668 
669 	raw_malloc_validate_pools(ctx);
670 
671 	BPOOL_FOREACH(ctx, &itr, &b) {
672 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
673 
674 		assert_header(hdr);
675 
676 		if (bufdump > 0) {
677 			const char *fname = hdr->fname;
678 
679 			if (!fname)
680 				fname = "unknown";
681 
682 			IMSG("buffer: %d bytes %s:%d",
683 				hdr->pl_size, fname, hdr->line);
684 		}
685 	}
686 
687 	malloc_unlock(ctx, exceptions);
688 }
689 
690 void mdbg_check(int bufdump)
691 {
692 	gen_mdbg_check(&malloc_ctx, bufdump);
693 }
694 #endif
695 
696 /*
697  * If malloc debug is enabled, malloc() and friends are redirected by macros
698  * to __mdbg_alloc() etc.
699  * We still want to export the standard entry points in case they are referenced
700  * by the application, either directly or via external libraries.
701  */
702 
703 #undef malloc
704 void *malloc(size_t size)
705 {
706 	return mem_alloc(MAF_NULL, NULL, 1, 1, size, __FILE__, __LINE__);
707 }
708 
709 #undef malloc_flags
710 void *malloc_flags(uint32_t flags, void *ptr, size_t alignment, size_t size)
711 {
712 	return mem_alloc(flags, ptr, alignment, 1, size, __FILE__, __LINE__);
713 }
714 
715 #undef calloc
716 void *calloc(size_t nmemb, size_t size)
717 {
718 	return mem_alloc(MAF_ZERO_INIT, NULL, 1, nmemb, size, __FILE__,
719 			 __LINE__);
720 }
721 
722 #undef realloc
723 void *realloc(void *ptr, size_t size)
724 {
725 	return mem_alloc(MAF_NULL, ptr, 1, 1, size, __FILE__, __LINE__);
726 }
727 
728 #undef memalign
729 void *memalign(size_t alignment, size_t size)
730 {
731 	return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
732 			 __LINE__);
733 }
734 
735 #if __STDC_VERSION__ >= 201112L
736 #undef aligned_alloc
737 void *aligned_alloc(size_t alignment, size_t size)
738 {
739 	if (size % alignment)
740 		return NULL;
741 
742 	return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
743 			 __LINE__);
744 }
745 #endif /* __STDC_VERSION__ */
746 
747 void free(void *ptr)
748 {
749 	free_flags(MAF_NULL, ptr);
750 }
751 
752 void free_wipe(void *ptr)
753 {
754 	free_flags(MAF_FREE_WIPE, ptr);
755 }
756 
757 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
758 {
759 	uint32_t exceptions = malloc_lock(ctx);
760 
761 	raw_malloc_add_pool(ctx, buf, len);
762 	malloc_unlock(ctx, exceptions);
763 }
764 
765 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
766 						void *buf, size_t len)
767 {
768 	uint32_t exceptions = malloc_lock(ctx);
769 	bool ret = false;
770 
771 	ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len);
772 	malloc_unlock(ctx, exceptions);
773 
774 	return ret;
775 }
776 
777 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
778 					    void *buf, size_t len)
779 {
780 	bool ret = false;
781 	uint32_t exceptions = malloc_lock(ctx);
782 
783 	ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len);
784 	malloc_unlock(ctx, exceptions);
785 	return ret;
786 }
787 
788 size_t raw_malloc_get_ctx_size(void)
789 {
790 	return sizeof(struct malloc_ctx);
791 }
792 
793 void raw_malloc_init_ctx(struct malloc_ctx *ctx)
794 {
795 	memset(ctx, 0, sizeof(*ctx));
796 	ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
797 	ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
798 }
799 
800 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
801 {
802 	const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead);
803 	uintptr_t start = (uintptr_t)buf;
804 	uintptr_t end = start + len;
805 	void *p = NULL;
806 	size_t l = 0;
807 
808 	start = ROUNDUP(start, SizeQuant);
809 	end = ROUNDDOWN(end, SizeQuant);
810 
811 	if (start > end || (end - start) < min_len) {
812 		DMSG("Skipping too small pool");
813 		return;
814 	}
815 
816 	/* First pool requires a bigger size */
817 	if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) {
818 		DMSG("Skipping too small initial pool");
819 		return;
820 	}
821 
822 	tag_asan_free((void *)start, end - start);
823 	bpool((void *)start, end - start, &ctx->poolset);
824 	l = ctx->pool_len + 1;
825 	p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
826 	assert(p);
827 	ctx->pool = p;
828 	ctx->pool[ctx->pool_len].buf = (void *)start;
829 	ctx->pool[ctx->pool_len].len = end - start;
830 #ifdef BufStats
831 	ctx->mstats.size += ctx->pool[ctx->pool_len].len;
832 #endif
833 	ctx->pool_len = l;
834 }
835 
836 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
837 				     void *buf, size_t len)
838 {
839 	uintptr_t buf_start = (uintptr_t)strip_tag(buf);
840 	uintptr_t buf_end = buf_start + len;
841 	size_t n = 0;
842 
843 	raw_malloc_validate_pools(ctx);
844 
845 	for (n = 0; n < ctx->pool_len; n++) {
846 		uintptr_t pool_start = (uintptr_t)strip_tag(ctx->pool[n].buf);
847 		uintptr_t pool_end = pool_start + ctx->pool[n].len;
848 
849 		if (buf_start > buf_end || pool_start > pool_end)
850 			return true;	/* Wrapping buffers, shouldn't happen */
851 
852 		if ((buf_start >= pool_start && buf_start < pool_end) ||
853 		    (buf_end > pool_start && buf_end < pool_end))
854 			return true;
855 	}
856 
857 	return false;
858 }
859 
860 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
861 					 void *buf, size_t len)
862 {
863 	struct bpool_iterator itr = { };
864 	void *b = NULL;
865 	uint8_t *start_buf = strip_tag(buf);
866 	uint8_t *end_buf = start_buf + len;
867 
868 	raw_malloc_validate_pools(ctx);
869 
870 	/* Check for wrapping */
871 	if (start_buf > end_buf)
872 		return false;
873 
874 	BPOOL_FOREACH(ctx, &itr, &b) {
875 		uint8_t *start_b = NULL;
876 		uint8_t *end_b = NULL;
877 		size_t s = 0;
878 
879 		start_b = strip_tag(get_payload_start_size(b, &s));
880 		end_b = start_b + s;
881 		if (start_buf >= start_b && end_buf <= end_b)
882 			return true;
883 	}
884 
885 	return false;
886 }
887 
888 #ifdef CFG_WITH_STATS
889 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct pta_stats_alloc *stats)
890 {
891 	memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats));
892 	stats->allocated = ctx->poolset.totalloc;
893 }
894 #endif
895 
896 void malloc_add_pool(void *buf, size_t len)
897 {
898 	gen_malloc_add_pool(&malloc_ctx, buf, len);
899 }
900 
901 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
902 {
903 	return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
904 }
905 
906 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
907 {
908 	return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
909 }
910 
911 #ifdef CFG_NS_VIRTUALIZATION
912 
913 #ifndef ENABLE_MDBG
914 
915 void *nex_malloc(size_t size)
916 {
917 	return mem_alloc(MAF_NEX, NULL, 1, 1, size, __FILE__, __LINE__);
918 }
919 
920 void *nex_calloc(size_t nmemb, size_t size)
921 {
922 	return mem_alloc(MAF_NEX | MAF_ZERO_INIT, NULL, 1, nmemb, size,
923 			 __FILE__, __LINE__);
924 }
925 
926 void *nex_realloc(void *ptr, size_t size)
927 {
928 	return mem_alloc(MAF_NEX, ptr, 1, 1, size, __FILE__, __LINE__);
929 }
930 
931 void *nex_memalign(size_t alignment, size_t size)
932 {
933 	return mem_alloc(MAF_NEX, NULL, alignment, 1, size, __FILE__, __LINE__);
934 }
935 
936 #else  /* ENABLE_MDBG */
937 
938 void nex_mdbg_check(int bufdump)
939 {
940 	gen_mdbg_check(&nex_malloc_ctx, bufdump);
941 }
942 
943 #endif	/* ENABLE_MDBG */
944 
945 void nex_free(void *ptr)
946 {
947 	free_flags(MAF_NEX, ptr);
948 }
949 
950 void nex_malloc_add_pool(void *buf, size_t len)
951 {
952 	gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
953 }
954 
955 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
956 {
957 	return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
958 }
959 
960 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
961 {
962 	return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
963 }
964 
965 #ifdef BufStats
966 
967 void nex_malloc_reset_stats(void)
968 {
969 	gen_malloc_reset_stats(&nex_malloc_ctx);
970 }
971 
972 void nex_malloc_get_stats(struct pta_stats_alloc *stats)
973 {
974 	gen_malloc_get_stats(&nex_malloc_ctx, stats);
975 }
976 
977 #endif
978 
979 #endif
980