xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision c211d0a49ecf2673dfb90f29fa31840eda12a18d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #define PROTOTYPES
7 
8 /*
9  *  BGET CONFIGURATION
10  *  ==================
11  */
12 /* #define BGET_ENABLE_ALL_OPTIONS */
13 #ifdef BGET_ENABLE_OPTION
14 #define TestProg    20000	/* Generate built-in test program
15 				   if defined.  The value specifies
16 				   how many buffer allocation attempts
17 				   the test program should make. */
18 #endif
19 
20 
21 #ifdef __LP64__
22 #define SizeQuant   16
23 #endif
24 #ifdef __ILP32__
25 #define SizeQuant   8
26 #endif
27 				/* Buffer allocation size quantum:
28 				   all buffers allocated are a
29 				   multiple of this size.  This
30 				   MUST be a power of two. */
31 
32 #ifdef BGET_ENABLE_OPTION
33 #define BufDump     1		/* Define this symbol to enable the
34 				   bpoold() function which dumps the
35 				   buffers in a buffer pool. */
36 
37 #define BufValid    1		/* Define this symbol to enable the
38 				   bpoolv() function for validating
39 				   a buffer pool. */
40 
41 #define DumpData    1		/* Define this symbol to enable the
42 				   bufdump() function which allows
43 				   dumping the contents of an allocated
44 				   or free buffer. */
45 
46 #define BufStats    1		/* Define this symbol to enable the
47 				   bstats() function which calculates
48 				   the total free space in the buffer
49 				   pool, the largest available
50 				   buffer, and the total space
51 				   currently allocated. */
52 
53 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
54 				   pattern of garbage to trip up
55 				   miscreants who attempt to use
56 				   pointers into released buffers. */
57 
58 #define BestFit     1		/* Use a best fit algorithm when
59 				   searching for space for an
60 				   allocation request.  This uses
61 				   memory more efficiently, but
62 				   allocation will be much slower. */
63 
64 #define BECtl       1		/* Define this symbol to enable the
65 				   bectl() function for automatic
66 				   pool space control.  */
67 #endif
68 
69 #ifdef MEM_DEBUG
70 #undef NDEBUG
71 #define DumpData    1
72 #define BufValid    1
73 #define FreeWipe    1
74 #endif
75 
76 #ifdef CFG_WITH_STATS
77 #define BufStats    1
78 #endif
79 
80 #include <compiler.h>
81 #include <malloc.h>
82 #include <stdbool.h>
83 #include <stdint.h>
84 #include <stdlib.h>
85 #include <string.h>
86 #include <trace.h>
87 #include <util.h>
88 
89 #if defined(__KERNEL__)
90 /* Compiling for TEE Core */
91 #include <kernel/asan.h>
92 #include <kernel/thread.h>
93 #include <kernel/spinlock.h>
94 
95 static void tag_asan_free(void *buf, size_t len)
96 {
97 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
98 }
99 
100 static void tag_asan_alloced(void *buf, size_t len)
101 {
102 	asan_tag_access(buf, (uint8_t *)buf + len);
103 }
104 
105 static void *memset_unchecked(void *s, int c, size_t n)
106 {
107 	return asan_memset_unchecked(s, c, n);
108 }
109 
110 #else /*__KERNEL__*/
111 /* Compiling for TA */
112 
113 static void tag_asan_free(void *buf __unused, size_t len __unused)
114 {
115 }
116 
117 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
118 {
119 }
120 
121 static void *memset_unchecked(void *s, int c, size_t n)
122 {
123 	return memset(s, c, n);
124 }
125 
126 #endif /*__KERNEL__*/
127 
128 #include "bget.c"		/* this is ugly, but this is bget */
129 
130 struct malloc_pool {
131 	void *buf;
132 	size_t len;
133 };
134 
135 struct malloc_ctx {
136 	struct bpoolset poolset;
137 	struct malloc_pool *pool;
138 	size_t pool_len;
139 #ifdef BufStats
140 	struct malloc_stats mstats;
141 #endif
142 #ifdef __KERNEL__
143 	unsigned int spinlock;
144 #endif
145 };
146 
147 #ifdef __KERNEL__
148 
149 static uint32_t malloc_lock(struct malloc_ctx *ctx)
150 {
151 	return cpu_spin_lock_xsave(&ctx->spinlock);
152 }
153 
154 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
155 {
156 	cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
157 }
158 
159 #else  /* __KERNEL__ */
160 
161 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
162 {
163 	return 0;
164 }
165 
166 static void malloc_unlock(struct malloc_ctx *ctx __unused,
167 			  uint32_t exceptions __unused)
168 {
169 }
170 
171 #endif	/* __KERNEL__ */
172 
173 #define DEFINE_CTX(name) struct malloc_ctx name =		\
174 	{ .poolset = { .freelist = { {0, 0},			\
175 			{&name.poolset.freelist,		\
176 			 &name.poolset.freelist}}}}
177 
178 static DEFINE_CTX(malloc_ctx);
179 
180 #ifdef CFG_VIRTUALIZATION
181 static __nex_data DEFINE_CTX(nex_malloc_ctx);
182 #endif
183 
184 #ifdef BufStats
185 
186 static void raw_malloc_return_hook(void *p, size_t requested_size,
187 				   struct malloc_ctx *ctx)
188 {
189 	if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
190 		ctx->mstats.max_allocated = ctx->poolset.totalloc;
191 
192 	if (!p) {
193 		ctx->mstats.num_alloc_fail++;
194 		if (requested_size > ctx->mstats.biggest_alloc_fail) {
195 			ctx->mstats.biggest_alloc_fail = requested_size;
196 			ctx->mstats.biggest_alloc_fail_used =
197 				ctx->poolset.totalloc;
198 		}
199 	}
200 }
201 
202 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
203 {
204 	uint32_t exceptions = malloc_lock(ctx);
205 
206 	ctx->mstats.max_allocated = 0;
207 	ctx->mstats.num_alloc_fail = 0;
208 	ctx->mstats.biggest_alloc_fail = 0;
209 	ctx->mstats.biggest_alloc_fail_used = 0;
210 	malloc_unlock(ctx, exceptions);
211 }
212 
213 void malloc_reset_stats(void)
214 {
215 	gen_malloc_reset_stats(&malloc_ctx);
216 }
217 
218 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
219 				 struct malloc_stats *stats)
220 {
221 	uint32_t exceptions = malloc_lock(ctx);
222 
223 	memcpy(stats, &ctx->mstats, sizeof(*stats));
224 	stats->allocated = ctx->poolset.totalloc;
225 	malloc_unlock(ctx, exceptions);
226 }
227 
228 void malloc_get_stats(struct malloc_stats *stats)
229 {
230 	gen_malloc_get_stats(&malloc_ctx, stats);
231 }
232 
233 #else /* BufStats */
234 
235 static void raw_malloc_return_hook(void *p __unused,
236 				   size_t requested_size __unused,
237 				   struct malloc_ctx *ctx __unused)
238 {
239 }
240 
241 #endif /* BufStats */
242 
243 #ifdef BufValid
244 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
245 {
246 	size_t n;
247 
248 	for (n = 0; n < ctx->pool_len; n++)
249 		bpoolv(ctx->pool[n].buf);
250 }
251 #else
252 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
253 {
254 }
255 #endif
256 
257 struct bpool_iterator {
258 	struct bfhead *next_buf;
259 	size_t pool_idx;
260 };
261 
262 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
263 					struct bpool_iterator *iterator)
264 {
265 	iterator->pool_idx = 0;
266 	iterator->next_buf = BFH(ctx->pool[0].buf);
267 }
268 
269 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
270 		size_t *len, bool *isfree)
271 {
272 	struct bfhead *b = iterator->next_buf;
273 	bufsize bs = b->bh.bsize;
274 
275 	if (bs == ESent)
276 		return false;
277 
278 	if (bs < 0) {
279 		/* Allocated buffer */
280 		bs = -bs;
281 
282 		*isfree = false;
283 	} else {
284 		/* Free Buffer */
285 		*isfree = true;
286 
287 		/* Assert that the free list links are intact */
288 		assert(b->ql.blink->ql.flink == b);
289 		assert(b->ql.flink->ql.blink == b);
290 	}
291 
292 	*buf = (uint8_t *)b + sizeof(struct bhead);
293 	*len = bs - sizeof(struct bhead);
294 
295 	iterator->next_buf = BFH((uint8_t *)b + bs);
296 	return true;
297 }
298 
299 static bool bpool_foreach(struct malloc_ctx *ctx,
300 			  struct bpool_iterator *iterator, void **buf)
301 {
302 	while (true) {
303 		size_t len;
304 		bool isfree;
305 
306 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
307 			if (isfree)
308 				continue;
309 			return true;
310 		}
311 
312 		if ((iterator->pool_idx + 1) >= ctx->pool_len)
313 			return false;
314 
315 		iterator->pool_idx++;
316 		iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
317 	}
318 }
319 
320 /* Convenience macro for looping over all allocated buffers */
321 #define BPOOL_FOREACH(ctx, iterator, bp)		      \
322 	for (bpool_foreach_iterator_init((ctx),(iterator));   \
323 	     bpool_foreach((ctx),(iterator), (bp));)
324 
325 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
326 			struct malloc_ctx *ctx)
327 {
328 	void *ptr = NULL;
329 	bufsize s;
330 
331 	/*
332 	 * Make sure that malloc has correct alignment of returned buffers.
333 	 * The assumption is that uintptr_t will be as wide as the largest
334 	 * required alignment of any type.
335 	 */
336 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
337 
338 	raw_malloc_validate_pools(ctx);
339 
340 	/* Compute total size */
341 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
342 		goto out;
343 	if (ADD_OVERFLOW(s, ftr_size, &s))
344 		goto out;
345 
346 	/* BGET doesn't like 0 sized allocations */
347 	if (!s)
348 		s++;
349 
350 	ptr = bget(s, &ctx->poolset);
351 out:
352 	raw_malloc_return_hook(ptr, pl_size, ctx);
353 
354 	return ptr;
355 }
356 
357 static void raw_free(void *ptr, struct malloc_ctx *ctx)
358 {
359 	raw_malloc_validate_pools(ctx);
360 
361 	if (ptr)
362 		brel(ptr, &ctx->poolset);
363 }
364 
365 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
366 			size_t pl_size, struct malloc_ctx *ctx)
367 {
368 	void *ptr = NULL;
369 	bufsize s;
370 
371 	raw_malloc_validate_pools(ctx);
372 
373 	/* Compute total size */
374 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
375 		goto out;
376 	if (ADD_OVERFLOW(s, hdr_size, &s))
377 		goto out;
378 	if (ADD_OVERFLOW(s, ftr_size, &s))
379 		goto out;
380 
381 	/* BGET doesn't like 0 sized allocations */
382 	if (!s)
383 		s++;
384 
385 	ptr = bgetz(s, &ctx->poolset);
386 out:
387 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx);
388 
389 	return ptr;
390 }
391 
392 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
393 			 size_t pl_size, struct malloc_ctx *ctx)
394 {
395 	void *p = NULL;
396 	bufsize s;
397 
398 	/* Compute total size */
399 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
400 		goto out;
401 	if (ADD_OVERFLOW(s, ftr_size, &s))
402 		goto out;
403 
404 	raw_malloc_validate_pools(ctx);
405 
406 	/* BGET doesn't like 0 sized allocations */
407 	if (!s)
408 		s++;
409 
410 	p = bgetr(ptr, s, &ctx->poolset);
411 out:
412 	raw_malloc_return_hook(p, pl_size, ctx);
413 
414 	return p;
415 }
416 
417 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn,
418 			      struct bpoolset *poolset)
419 {
420 	assert(BH((char *)bf + size) == bn);
421 	assert(bn->bsize < 0); /* Next block should be allocated */
422 	/* Next block shouldn't already have free block in front */
423 	assert(bn->prevfree == 0);
424 
425 	/* Create the free buf header */
426 	bf->bh.bsize = size;
427 	bf->bh.prevfree = 0;
428 
429 	/* Update next block to point to the new free buf header */
430 	bn->prevfree = size;
431 
432 	/* Insert the free buffer on the free list */
433 	assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
434 	assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
435 	bf->ql.flink = &poolset->freelist;
436 	bf->ql.blink = poolset->freelist.ql.blink;
437 	poolset->freelist.ql.blink = bf;
438 	bf->ql.blink->ql.flink = bf;
439 }
440 
441 static void brel_before(char *orig_buf, char *new_buf, struct bpoolset *poolset)
442 {
443 	struct bfhead *bf;
444 	struct bhead *b;
445 	bufsize size;
446 	bufsize orig_size;
447 
448 	assert(orig_buf < new_buf);
449 	/* There has to be room for the freebuf header */
450 	size = (bufsize)(new_buf - orig_buf);
451 	assert(size >= (SizeQ + sizeof(struct bhead)));
452 
453 	/* Point to head of original buffer */
454 	bf = BFH(orig_buf - sizeof(struct bhead));
455 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
456 
457 	/* Point to head of the becoming new allocated buffer */
458 	b = BH(new_buf - sizeof(struct bhead));
459 
460 	if (bf->bh.prevfree != 0) {
461 		/* Previous buffer is free, consolidate with that buffer */
462 		struct bfhead *bfp;
463 
464 		/* Update the previous free buffer */
465 		bfp = BFH((char *)bf - bf->bh.prevfree);
466 		assert(bfp->bh.bsize == bf->bh.prevfree);
467 		bfp->bh.bsize += size;
468 
469 		/* Make a new allocated buffer header */
470 		b->prevfree = bfp->bh.bsize;
471 		/* Make it negative since it's an allocated buffer */
472 		b->bsize = -(orig_size - size);
473 	} else {
474 		/*
475 		 * Previous buffer is allocated, create a new buffer and
476 		 * insert on the free list.
477 		 */
478 
479 		/* Make it negative since it's an allocated buffer */
480 		b->bsize = -(orig_size - size);
481 
482 		create_free_block(bf, size, b, poolset);
483 	}
484 
485 #ifdef BufStats
486 	poolset->totalloc -= size;
487 	assert(poolset->totalloc >= 0);
488 #endif
489 }
490 
491 static void brel_after(char *buf, bufsize size, struct bpoolset *poolset)
492 {
493 	struct bhead *b = BH(buf - sizeof(struct bhead));
494 	struct bhead *bn;
495 	bufsize new_size = size;
496 	bufsize free_size;
497 
498 	/* Select the size in the same way as in bget() */
499 	if (new_size < SizeQ)
500 		new_size = SizeQ;
501 #ifdef SizeQuant
502 #if SizeQuant > 1
503 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
504 #endif
505 #endif
506 	new_size += sizeof(struct bhead);
507 	assert(new_size <= -b->bsize);
508 
509 	/*
510 	 * Check if there's enough space at the end of the buffer to be
511 	 * able to free anything.
512 	 */
513 	free_size = -b->bsize - new_size;
514 	if (free_size < SizeQ + sizeof(struct bhead))
515 		return;
516 
517 	bn = BH((char *)b - b->bsize);
518 	/*
519 	 * Set the new size of the buffer;
520 	 */
521 	b->bsize = -new_size;
522 	if (bn->bsize > 0) {
523 		/* Next buffer is free, consolidate with that buffer */
524 		struct bfhead *bfn = BFH(bn);
525 		struct bfhead *nbf = BFH((char *)b + new_size);
526 		struct bhead *bnn = BH((char *)bn + bn->bsize);
527 
528 		assert(bfn->bh.prevfree == 0);
529 		assert(bnn->prevfree == bfn->bh.bsize);
530 
531 		/* Construct the new free header */
532 		nbf->bh.prevfree = 0;
533 		nbf->bh.bsize = bfn->bh.bsize + free_size;
534 
535 		/* Update the buffer after this to point to this header */
536 		bnn->prevfree += free_size;
537 
538 		/*
539 		 * Unlink the previous free buffer and link the new free
540 		 * buffer.
541 		 */
542 		assert(bfn->ql.blink->ql.flink == bfn);
543 		assert(bfn->ql.flink->ql.blink == bfn);
544 
545 		/* Assing blink and flink from old free buffer */
546 		nbf->ql.blink = bfn->ql.blink;
547 		nbf->ql.flink = bfn->ql.flink;
548 
549 		/* Replace the old free buffer with the new one */
550 		nbf->ql.blink->ql.flink = nbf;
551 		nbf->ql.flink->ql.blink = nbf;
552 	} else {
553 		/* New buffer is allocated, create a new free buffer */
554 		create_free_block(BFH((char *)b + new_size), free_size, bn, poolset);
555 	}
556 
557 #ifdef BufStats
558 	poolset->totalloc -= free_size;
559 	assert(poolset->totalloc >= 0);
560 #endif
561 
562 }
563 
564 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
565 			  size_t size, struct malloc_ctx *ctx)
566 {
567 	size_t s;
568 	uintptr_t b;
569 
570 	raw_malloc_validate_pools(ctx);
571 
572 	if (!IS_POWER_OF_TWO(alignment))
573 		return NULL;
574 
575 	/*
576 	 * Normal malloc with headers always returns something SizeQuant
577 	 * aligned.
578 	 */
579 	if (alignment <= SizeQuant)
580 		return raw_malloc(hdr_size, ftr_size, size, ctx);
581 
582 	s = hdr_size + ftr_size + alignment + size +
583 	    SizeQ + sizeof(struct bhead);
584 
585 	/* Check wapping */
586 	if (s < alignment || s < size)
587 		return NULL;
588 
589 	b = (uintptr_t)bget(s, &ctx->poolset);
590 	if (!b)
591 		goto out;
592 
593 	if ((b + hdr_size) & (alignment - 1)) {
594 		/*
595 		 * Returned buffer is not aligned as requested if the
596 		 * hdr_size is added. Find an offset into the buffer
597 		 * that is far enough in to the buffer to be able to free
598 		 * what's in front.
599 		 */
600 		uintptr_t p;
601 
602 		/*
603 		 * Find the point where the buffer including supplied
604 		 * header size should start.
605 		 */
606 		p = b + hdr_size + alignment;
607 		p &= ~(alignment - 1);
608 		p -= hdr_size;
609 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
610 			p += alignment;
611 		assert((p + hdr_size + ftr_size + size) <= (b + s));
612 
613 		/* Free the front part of the buffer */
614 		brel_before((void *)b, (void *)p, &ctx->poolset);
615 
616 		/* Set the new start of the buffer */
617 		b = p;
618 	}
619 
620 	/*
621 	 * Since b is now aligned, release what we don't need at the end of
622 	 * the buffer.
623 	 */
624 	brel_after((void *)b, hdr_size + ftr_size + size, &ctx->poolset);
625 out:
626 	raw_malloc_return_hook((void *)b, size, ctx);
627 
628 	return (void *)b;
629 }
630 
631 /* Most of the stuff in this function is copied from bgetr() in bget.c */
632 static __maybe_unused bufsize bget_buf_size(void *buf)
633 {
634 	bufsize osize;          /* Old size of buffer */
635 	struct bhead *b;
636 
637 	b = BH(((char *)buf) - sizeof(struct bhead));
638 	osize = -b->bsize;
639 #ifdef BECtl
640 	if (osize == 0) {
641 		/*  Buffer acquired directly through acqfcn. */
642 		struct bdhead *bd;
643 
644 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
645 		osize = bd->tsize - sizeof(struct bdhead);
646 	} else
647 #endif
648 		osize -= sizeof(struct bhead);
649 	assert(osize > 0);
650 	return osize;
651 }
652 
653 #ifdef ENABLE_MDBG
654 
655 struct mdbg_hdr {
656 	const char *fname;
657 	uint16_t line;
658 	uint32_t pl_size;
659 	uint32_t magic;
660 #if defined(ARM64)
661 	uint64_t pad;
662 #endif
663 };
664 
665 #define MDBG_HEADER_MAGIC	0xadadadad
666 #define MDBG_FOOTER_MAGIC	0xecececec
667 
668 static size_t mdbg_get_ftr_size(size_t pl_size)
669 {
670 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
671 
672 	return ftr_pad + sizeof(uint32_t);
673 }
674 
675 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
676 {
677 	uint32_t *footer;
678 
679 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
680 			      mdbg_get_ftr_size(hdr->pl_size));
681 	footer--;
682 	return footer;
683 }
684 
685 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
686 		int lineno, size_t pl_size)
687 {
688 	uint32_t *footer;
689 
690 	hdr->fname = fname;
691 	hdr->line = lineno;
692 	hdr->pl_size = pl_size;
693 	hdr->magic = MDBG_HEADER_MAGIC;
694 
695 	footer = mdbg_get_footer(hdr);
696 	*footer = MDBG_FOOTER_MAGIC;
697 }
698 
699 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname,
700 			     int lineno, size_t size)
701 {
702 	struct mdbg_hdr *hdr;
703 	uint32_t exceptions = malloc_lock(ctx);
704 
705 	/*
706 	 * Check struct mdbg_hdr doesn't get bad alignment.
707 	 * This is required by C standard: the buffer returned from
708 	 * malloc() should be aligned with a fundamental alignment.
709 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
710 	 */
711 	COMPILE_TIME_ASSERT(
712 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
713 
714 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
715 			 mdbg_get_ftr_size(size), size, ctx);
716 	if (hdr) {
717 		mdbg_update_hdr(hdr, fname, lineno, size);
718 		hdr++;
719 	}
720 
721 	malloc_unlock(ctx, exceptions);
722 	return hdr;
723 }
724 
725 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
726 {
727 	assert(hdr->magic == MDBG_HEADER_MAGIC);
728 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
729 }
730 
731 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr)
732 {
733 	struct mdbg_hdr *hdr = ptr;
734 
735 	if (hdr) {
736 		hdr--;
737 		assert_header(hdr);
738 		hdr->magic = 0;
739 		*mdbg_get_footer(hdr) = 0;
740 		raw_free(hdr, ctx);
741 	}
742 }
743 
744 void free(void *ptr)
745 {
746 	uint32_t exceptions = malloc_lock(&malloc_ctx);
747 
748 	gen_mdbg_free(&malloc_ctx, ptr);
749 	malloc_unlock(&malloc_ctx, exceptions);
750 }
751 
752 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno,
753 		      size_t nmemb, size_t size)
754 {
755 	struct mdbg_hdr *hdr;
756 	uint32_t exceptions = malloc_lock(ctx);
757 
758 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
759 			  mdbg_get_ftr_size(nmemb * size), nmemb, size,
760 			  ctx);
761 	if (hdr) {
762 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
763 		hdr++;
764 	}
765 	malloc_unlock(ctx, exceptions);
766 	return hdr;
767 }
768 
769 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname,
770 				       int lineno, void *ptr, size_t size)
771 {
772 	struct mdbg_hdr *hdr = ptr;
773 
774 	if (hdr) {
775 		hdr--;
776 		assert_header(hdr);
777 	}
778 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
779 			   mdbg_get_ftr_size(size), size, ctx);
780 	if (hdr) {
781 		mdbg_update_hdr(hdr, fname, lineno, size);
782 		hdr++;
783 	}
784 	return hdr;
785 }
786 
787 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname,
788 			      int lineno, void *ptr, size_t size)
789 {
790 	void *p;
791 	uint32_t exceptions = malloc_lock(ctx);
792 
793 	p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size);
794 	malloc_unlock(ctx, exceptions);
795 	return p;
796 }
797 
798 #define realloc_unlocked(ctx, ptr, size)					\
799 		gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size))
800 
801 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname,
802 			       int lineno, size_t alignment, size_t size)
803 {
804 	struct mdbg_hdr *hdr;
805 	uint32_t exceptions = malloc_lock(ctx);
806 
807 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
808 			   alignment, size, ctx);
809 	if (hdr) {
810 		mdbg_update_hdr(hdr, fname, lineno, size);
811 		hdr++;
812 	}
813 	malloc_unlock(ctx, exceptions);
814 	return hdr;
815 }
816 
817 
818 static void *get_payload_start_size(void *raw_buf, size_t *size)
819 {
820 	struct mdbg_hdr *hdr = raw_buf;
821 
822 	assert(bget_buf_size(hdr) >= hdr->pl_size);
823 	*size = hdr->pl_size;
824 	return hdr + 1;
825 }
826 
827 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
828 {
829 	struct bpool_iterator itr;
830 	void *b;
831 	uint32_t exceptions = malloc_lock(ctx);
832 
833 	raw_malloc_validate_pools(ctx);
834 
835 	BPOOL_FOREACH(ctx, &itr, &b) {
836 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
837 
838 		assert_header(hdr);
839 
840 		if (bufdump > 0) {
841 			const char *fname = hdr->fname;
842 
843 			if (!fname)
844 				fname = "unknown";
845 
846 			IMSG("buffer: %d bytes %s:%d\n",
847 				hdr->pl_size, fname, hdr->line);
848 		}
849 	}
850 
851 	malloc_unlock(ctx, exceptions);
852 }
853 
854 void *mdbg_malloc(const char *fname, int lineno, size_t size)
855 {
856 	return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size);
857 }
858 
859 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
860 {
861 	return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size);
862 }
863 
864 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
865 {
866 	return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size);
867 }
868 
869 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
870 		    size_t size)
871 {
872 	return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size);
873 }
874 
875 void mdbg_check(int bufdump)
876 {
877 	gen_mdbg_check(&malloc_ctx, bufdump);
878 }
879 #else
880 
881 void *malloc(size_t size)
882 {
883 	void *p;
884 	uint32_t exceptions = malloc_lock(&malloc_ctx);
885 
886 	p = raw_malloc(0, 0, size, &malloc_ctx);
887 	malloc_unlock(&malloc_ctx, exceptions);
888 	return p;
889 }
890 
891 void free(void *ptr)
892 {
893 	uint32_t exceptions = malloc_lock(&malloc_ctx);
894 
895 	raw_free(ptr, &malloc_ctx);
896 	malloc_unlock(&malloc_ctx, exceptions);
897 }
898 
899 void *calloc(size_t nmemb, size_t size)
900 {
901 	void *p;
902 	uint32_t exceptions = malloc_lock(&malloc_ctx);
903 
904 	p = raw_calloc(0, 0, nmemb, size, &malloc_ctx);
905 	malloc_unlock(&malloc_ctx, exceptions);
906 	return p;
907 }
908 
909 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr,
910 			      size_t size)
911 {
912 	return raw_realloc(ptr, 0, 0, size, ctx);
913 }
914 
915 void *realloc(void *ptr, size_t size)
916 {
917 	void *p;
918 	uint32_t exceptions = malloc_lock(&malloc_ctx);
919 
920 	p = realloc_unlocked(&malloc_ctx, ptr, size);
921 	malloc_unlock(&malloc_ctx, exceptions);
922 	return p;
923 }
924 
925 void *memalign(size_t alignment, size_t size)
926 {
927 	void *p;
928 	uint32_t exceptions = malloc_lock(&malloc_ctx);
929 
930 	p = raw_memalign(0, 0, alignment, size, &malloc_ctx);
931 	malloc_unlock(&malloc_ctx, exceptions);
932 	return p;
933 }
934 
935 static void *get_payload_start_size(void *ptr, size_t *size)
936 {
937 	*size = bget_buf_size(ptr);
938 	return ptr;
939 }
940 
941 #endif
942 
943 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
944 {
945 	void *p;
946 	size_t l;
947 	uint32_t exceptions;
948 	uintptr_t start = (uintptr_t)buf;
949 	uintptr_t end = start + len;
950 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
951 					(~(SizeQuant - 1))) +
952 				sizeof(struct bhead) * 2;
953 
954 
955 	start = ROUNDUP(start, SizeQuant);
956 	end = ROUNDDOWN(end, SizeQuant);
957 	assert(start < end);
958 
959 	if ((end - start) < min_len) {
960 		DMSG("Skipping too small pool");
961 		return;
962 	}
963 
964 	exceptions = malloc_lock(ctx);
965 
966 	tag_asan_free((void *)start, end - start);
967 	bpool((void *)start, end - start, &ctx->poolset);
968 	l = ctx->pool_len + 1;
969 	p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
970 	assert(p);
971 	ctx->pool = p;
972 	ctx->pool[ctx->pool_len].buf = (void *)start;
973 	ctx->pool[ctx->pool_len].len = end - start;
974 #ifdef BufStats
975 	ctx->mstats.size += ctx->pool[ctx->pool_len].len;
976 #endif
977 	ctx->pool_len = l;
978 	malloc_unlock(ctx, exceptions);
979 }
980 
981 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
982 						void *buf, size_t len)
983 {
984 	struct bpool_iterator itr;
985 	void *b;
986 	uint8_t *start_buf = buf;
987 	uint8_t *end_buf = start_buf + len;
988 	bool ret = false;
989 	uint32_t exceptions = malloc_lock(ctx);
990 
991 	raw_malloc_validate_pools(ctx);
992 
993 	/* Check for wrapping */
994 	if (start_buf > end_buf)
995 		goto out;
996 
997 	BPOOL_FOREACH(ctx, &itr, &b) {
998 		uint8_t *start_b;
999 		uint8_t *end_b;
1000 		size_t s;
1001 
1002 		start_b = get_payload_start_size(b, &s);
1003 		end_b = start_b + s;
1004 
1005 		if (start_buf >= start_b && end_buf <= end_b) {
1006 			ret = true;
1007 			goto out;
1008 		}
1009 	}
1010 
1011 out:
1012 	malloc_unlock(ctx, exceptions);
1013 
1014 	return ret;
1015 }
1016 
1017 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
1018 					    void *buf, size_t len)
1019 {
1020 	uintptr_t buf_start = (uintptr_t) buf;
1021 	uintptr_t buf_end = buf_start + len;
1022 	size_t n;
1023 	bool ret = false;
1024 	uint32_t exceptions = malloc_lock(ctx);
1025 
1026 	raw_malloc_validate_pools(ctx);
1027 
1028 	for (n = 0; n < ctx->pool_len; n++) {
1029 		uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf;
1030 		uintptr_t pool_end = pool_start + ctx->pool[n].len;
1031 
1032 		if (buf_start > buf_end || pool_start > pool_end) {
1033 			ret = true;	/* Wrapping buffers, shouldn't happen */
1034 			goto out;
1035 		}
1036 
1037 		if (buf_end > pool_start || buf_start < pool_end) {
1038 			ret = true;
1039 			goto out;
1040 		}
1041 	}
1042 
1043 out:
1044 	malloc_unlock(ctx, exceptions);
1045 	return ret;
1046 }
1047 
1048 void malloc_add_pool(void *buf, size_t len)
1049 {
1050 	gen_malloc_add_pool(&malloc_ctx, buf, len);
1051 }
1052 
1053 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
1054 {
1055 	return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
1056 }
1057 
1058 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
1059 {
1060 	return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
1061 }
1062 
1063 #ifdef CFG_VIRTUALIZATION
1064 
1065 #ifndef ENABLE_MDBG
1066 
1067 void *nex_malloc(size_t size)
1068 {
1069 	void *p;
1070 	uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1071 
1072 	p = raw_malloc(0, 0, size, &nex_malloc_ctx);
1073 	malloc_unlock(&nex_malloc_ctx, exceptions);
1074 	return p;
1075 }
1076 
1077 void *nex_calloc(size_t nmemb, size_t size)
1078 {
1079 	void *p;
1080 	uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1081 
1082 	p = raw_calloc(0, 0, nmemb, size, &nex_malloc_ctx);
1083 	malloc_unlock(&nex_malloc_ctx, exceptions);
1084 	return p;
1085 }
1086 
1087 void *nex_realloc(void *ptr, size_t size)
1088 {
1089 	void *p;
1090 	uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1091 
1092 	p = realloc_unlocked(&nex_malloc_ctx, ptr, size);
1093 	malloc_unlock(&nex_malloc_ctx, exceptions);
1094 	return p;
1095 }
1096 
1097 void *nex_memalign(size_t alignment, size_t size)
1098 {
1099 	void *p;
1100 	uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1101 
1102 	p = raw_memalign(0, 0, alignment, size, &nex_malloc_ctx);
1103 	malloc_unlock(&nex_malloc_ctx, exceptions);
1104 	return p;
1105 }
1106 
1107 void nex_free(void *ptr)
1108 {
1109 	uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1110 
1111 	raw_free(ptr, &nex_malloc_ctx);
1112 	malloc_unlock(&nex_malloc_ctx, exceptions);
1113 }
1114 
1115 #else  /* ENABLE_MDBG */
1116 
1117 void *nex_mdbg_malloc(const char *fname, int lineno, size_t size)
1118 {
1119 	return gen_mdbg_malloc(&nex_malloc_ctx, fname, lineno, size);
1120 }
1121 
1122 void *nex_mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
1123 {
1124 	return gen_mdbg_calloc(&nex_malloc_ctx, fname, lineno, nmemb, size);
1125 }
1126 
1127 void *nex_mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
1128 {
1129 	return gen_mdbg_realloc(&nex_malloc_ctx, fname, lineno, ptr, size);
1130 }
1131 
1132 void *nex_mdbg_memalign(const char *fname, int lineno, size_t alignment,
1133 		size_t size)
1134 {
1135 	return gen_mdbg_memalign(&nex_malloc_ctx, fname, lineno, alignment, size);
1136 }
1137 
1138 void nex_mdbg_check(int bufdump)
1139 {
1140 	gen_mdbg_check(&nex_malloc_ctx, bufdump);
1141 }
1142 
1143 void nex_free(void *ptr)
1144 {
1145 	uint32_t exceptions = malloc_lock(&nex_malloc_ctx);
1146 
1147 	gen_mdbg_free(&nex_malloc_ctx, ptr);
1148 	malloc_unlock(&nex_malloc_ctx, exceptions);
1149 }
1150 
1151 #endif	/* ENABLE_MDBG */
1152 
1153 void nex_malloc_add_pool(void *buf, size_t len)
1154 {
1155 	gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
1156 }
1157 
1158 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
1159 {
1160 	return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
1161 }
1162 
1163 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
1164 {
1165 	return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
1166 }
1167 
1168 void nex_malloc_reset_stats(void)
1169 {
1170 	gen_malloc_reset_stats(&nex_malloc_ctx);
1171 }
1172 
1173 void nex_malloc_get_stats(struct malloc_stats *stats)
1174 {
1175 	gen_malloc_get_stats(&nex_malloc_ctx, stats);
1176 }
1177 
1178 #endif
1179