xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 741b437f06a298194b4a1b43b526fdd18ee7fda8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #define PROTOTYPES
7 
8 /*
9  *  BGET CONFIGURATION
10  *  ==================
11  */
12 /* #define BGET_ENABLE_ALL_OPTIONS */
13 #ifdef BGET_ENABLE_OPTION
14 #define TestProg    20000	/* Generate built-in test program
15 				   if defined.  The value specifies
16 				   how many buffer allocation attempts
17 				   the test program should make. */
18 #endif
19 
20 
21 #ifdef __LP64__
22 #define SizeQuant   16
23 #endif
24 #ifdef __ILP32__
25 #define SizeQuant   8
26 #endif
27 				/* Buffer allocation size quantum:
28 				   all buffers allocated are a
29 				   multiple of this size.  This
30 				   MUST be a power of two. */
31 
32 #ifdef BGET_ENABLE_OPTION
33 #define BufDump     1		/* Define this symbol to enable the
34 				   bpoold() function which dumps the
35 				   buffers in a buffer pool. */
36 
37 #define BufValid    1		/* Define this symbol to enable the
38 				   bpoolv() function for validating
39 				   a buffer pool. */
40 
41 #define DumpData    1		/* Define this symbol to enable the
42 				   bufdump() function which allows
43 				   dumping the contents of an allocated
44 				   or free buffer. */
45 
46 #define BufStats    1		/* Define this symbol to enable the
47 				   bstats() function which calculates
48 				   the total free space in the buffer
49 				   pool, the largest available
50 				   buffer, and the total space
51 				   currently allocated. */
52 
53 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
54 				   pattern of garbage to trip up
55 				   miscreants who attempt to use
56 				   pointers into released buffers. */
57 
58 #define BestFit     1		/* Use a best fit algorithm when
59 				   searching for space for an
60 				   allocation request.  This uses
61 				   memory more efficiently, but
62 				   allocation will be much slower. */
63 
64 #define BECtl       1		/* Define this symbol to enable the
65 				   bectl() function for automatic
66 				   pool space control.  */
67 #endif
68 
69 #ifdef MEM_DEBUG
70 #undef NDEBUG
71 #define DumpData    1
72 #define BufValid    1
73 #define FreeWipe    1
74 #endif
75 
76 #ifdef CFG_WITH_STATS
77 #define BufStats    1
78 #endif
79 
80 #include <compiler.h>
81 #include <malloc.h>
82 #include <stdbool.h>
83 #include <stdint.h>
84 #include <stdlib.h>
85 #include <string.h>
86 #include <trace.h>
87 #include <util.h>
88 
89 #if defined(__KERNEL__)
90 /* Compiling for TEE Core */
91 #include <kernel/asan.h>
92 #include <kernel/thread.h>
93 #include <kernel/spinlock.h>
94 
95 static void tag_asan_free(void *buf, size_t len)
96 {
97 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
98 }
99 
100 static void tag_asan_alloced(void *buf, size_t len)
101 {
102 	asan_tag_access(buf, (uint8_t *)buf + len);
103 }
104 
105 static void *memset_unchecked(void *s, int c, size_t n)
106 {
107 	return asan_memset_unchecked(s, c, n);
108 }
109 
110 #else /*__KERNEL__*/
111 /* Compiling for TA */
112 
113 static void tag_asan_free(void *buf __unused, size_t len __unused)
114 {
115 }
116 
117 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
118 {
119 }
120 
121 static void *memset_unchecked(void *s, int c, size_t n)
122 {
123 	return memset(s, c, n);
124 }
125 
126 #endif /*__KERNEL__*/
127 
128 #include "bget.c"		/* this is ugly, but this is bget */
129 
130 struct malloc_pool {
131 	void *buf;
132 	size_t len;
133 };
134 
135 struct malloc_ctx {
136 	struct bpoolset poolset;
137 	struct malloc_pool *pool;
138 	size_t pool_len;
139 #ifdef BufStats
140 	struct malloc_stats mstats;
141 #endif
142 #ifdef __KERNEL__
143 	unsigned int spinlock;
144 #endif
145 };
146 
147 #ifdef __KERNEL__
148 
149 static uint32_t malloc_lock(struct malloc_ctx *ctx)
150 {
151 	return cpu_spin_lock_xsave(&ctx->spinlock);
152 }
153 
154 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
155 {
156 	cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
157 }
158 
159 #else  /* __KERNEL__ */
160 
161 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
162 {
163 	return 0;
164 }
165 
166 static void malloc_unlock(struct malloc_ctx *ctx __unused,
167 			  uint32_t exceptions __unused)
168 {
169 }
170 
171 #endif	/* __KERNEL__ */
172 
173 #define DEFINE_CTX(name) struct malloc_ctx name =		\
174 	{ .poolset = { .freelist = { {0, 0},			\
175 			{&name.poolset.freelist,		\
176 			 &name.poolset.freelist}}}}
177 
178 static DEFINE_CTX(malloc_ctx);
179 
180 #ifdef BufStats
181 
182 static void raw_malloc_return_hook(void *p, size_t requested_size,
183 				   struct malloc_ctx *ctx)
184 {
185 	if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
186 		ctx->mstats.max_allocated = ctx->poolset.totalloc;
187 
188 	if (!p) {
189 		ctx->mstats.num_alloc_fail++;
190 		if (requested_size > ctx->mstats.biggest_alloc_fail) {
191 			ctx->mstats.biggest_alloc_fail = requested_size;
192 			ctx->mstats.biggest_alloc_fail_used =
193 				ctx->poolset.totalloc;
194 		}
195 	}
196 }
197 
198 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
199 {
200 	uint32_t exceptions = malloc_lock(ctx);
201 
202 	ctx->mstats.max_allocated = 0;
203 	ctx->mstats.num_alloc_fail = 0;
204 	ctx->mstats.biggest_alloc_fail = 0;
205 	ctx->mstats.biggest_alloc_fail_used = 0;
206 	malloc_unlock(ctx, exceptions);
207 }
208 
209 void malloc_reset_stats(void)
210 {
211 	gen_malloc_reset_stats(&malloc_ctx);
212 }
213 
214 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
215 				 struct malloc_stats *stats)
216 {
217 	uint32_t exceptions = malloc_lock(&malloc_ctx);
218 
219 	memcpy(stats, &ctx->mstats, sizeof(*stats));
220 	stats->allocated = ctx->poolset.totalloc;
221 	malloc_unlock(ctx, exceptions);
222 }
223 
224 void malloc_get_stats(struct malloc_stats *stats)
225 {
226 	gen_malloc_get_stats(&malloc_ctx, stats);
227 }
228 
229 #else /* BufStats */
230 
231 static void raw_malloc_return_hook(void *p __unused,
232 				   size_t requested_size __unused,
233 				   struct malloc_ctx *ctx __unused)
234 {
235 }
236 
237 #endif /* BufStats */
238 
239 #ifdef BufValid
240 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
241 {
242 	size_t n;
243 
244 	for (n = 0; n < ctx->pool_len; n++)
245 		bpoolv(ctx->pool[n].buf);
246 }
247 #else
248 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
249 {
250 }
251 #endif
252 
253 struct bpool_iterator {
254 	struct bfhead *next_buf;
255 	size_t pool_idx;
256 };
257 
258 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
259 					struct bpool_iterator *iterator)
260 {
261 	iterator->pool_idx = 0;
262 	iterator->next_buf = BFH(ctx->pool[0].buf);
263 }
264 
265 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
266 		size_t *len, bool *isfree)
267 {
268 	struct bfhead *b = iterator->next_buf;
269 	bufsize bs = b->bh.bsize;
270 
271 	if (bs == ESent)
272 		return false;
273 
274 	if (bs < 0) {
275 		/* Allocated buffer */
276 		bs = -bs;
277 
278 		*isfree = false;
279 	} else {
280 		/* Free Buffer */
281 		*isfree = true;
282 
283 		/* Assert that the free list links are intact */
284 		assert(b->ql.blink->ql.flink == b);
285 		assert(b->ql.flink->ql.blink == b);
286 	}
287 
288 	*buf = (uint8_t *)b + sizeof(struct bhead);
289 	*len = bs - sizeof(struct bhead);
290 
291 	iterator->next_buf = BFH((uint8_t *)b + bs);
292 	return true;
293 }
294 
295 static bool bpool_foreach(struct malloc_ctx *ctx,
296 			  struct bpool_iterator *iterator, void **buf)
297 {
298 	while (true) {
299 		size_t len;
300 		bool isfree;
301 
302 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
303 			if (isfree)
304 				continue;
305 			return true;
306 		}
307 
308 		if ((iterator->pool_idx + 1) >= ctx->pool_len)
309 			return false;
310 
311 		iterator->pool_idx++;
312 		iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
313 	}
314 }
315 
316 /* Convenience macro for looping over all allocated buffers */
317 #define BPOOL_FOREACH(ctx, iterator, bp)		      \
318 	for (bpool_foreach_iterator_init((ctx),(iterator));   \
319 	     bpool_foreach((ctx),(iterator), (bp));)
320 
321 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
322 			struct malloc_ctx *ctx)
323 {
324 	void *ptr = NULL;
325 	bufsize s;
326 
327 	/*
328 	 * Make sure that malloc has correct alignment of returned buffers.
329 	 * The assumption is that uintptr_t will be as wide as the largest
330 	 * required alignment of any type.
331 	 */
332 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
333 
334 	raw_malloc_validate_pools(ctx);
335 
336 	/* Compute total size */
337 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
338 		goto out;
339 	if (ADD_OVERFLOW(s, ftr_size, &s))
340 		goto out;
341 
342 	/* BGET doesn't like 0 sized allocations */
343 	if (!s)
344 		s++;
345 
346 	ptr = bget(s, &ctx->poolset);
347 out:
348 	raw_malloc_return_hook(ptr, pl_size, ctx);
349 
350 	return ptr;
351 }
352 
353 static void raw_free(void *ptr, struct malloc_ctx *ctx)
354 {
355 	raw_malloc_validate_pools(ctx);
356 
357 	if (ptr)
358 		brel(ptr, &ctx->poolset);
359 }
360 
361 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
362 			size_t pl_size, struct malloc_ctx *ctx)
363 {
364 	void *ptr = NULL;
365 	bufsize s;
366 
367 	raw_malloc_validate_pools(ctx);
368 
369 	/* Compute total size */
370 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
371 		goto out;
372 	if (ADD_OVERFLOW(s, hdr_size, &s))
373 		goto out;
374 	if (ADD_OVERFLOW(s, ftr_size, &s))
375 		goto out;
376 
377 	/* BGET doesn't like 0 sized allocations */
378 	if (!s)
379 		s++;
380 
381 	ptr = bgetz(s, &ctx->poolset);
382 out:
383 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size, ctx);
384 
385 	return ptr;
386 }
387 
388 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
389 			 size_t pl_size, struct malloc_ctx *ctx)
390 {
391 	void *p = NULL;
392 	bufsize s;
393 
394 	/* Compute total size */
395 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
396 		goto out;
397 	if (ADD_OVERFLOW(s, ftr_size, &s))
398 		goto out;
399 
400 	raw_malloc_validate_pools(ctx);
401 
402 	/* BGET doesn't like 0 sized allocations */
403 	if (!s)
404 		s++;
405 
406 	p = bgetr(ptr, s, &ctx->poolset);
407 out:
408 	raw_malloc_return_hook(p, pl_size, ctx);
409 
410 	return p;
411 }
412 
413 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn,
414 			      struct bpoolset *poolset)
415 {
416 	assert(BH((char *)bf + size) == bn);
417 	assert(bn->bsize < 0); /* Next block should be allocated */
418 	/* Next block shouldn't already have free block in front */
419 	assert(bn->prevfree == 0);
420 
421 	/* Create the free buf header */
422 	bf->bh.bsize = size;
423 	bf->bh.prevfree = 0;
424 
425 	/* Update next block to point to the new free buf header */
426 	bn->prevfree = size;
427 
428 	/* Insert the free buffer on the free list */
429 	assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
430 	assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
431 	bf->ql.flink = &poolset->freelist;
432 	bf->ql.blink = poolset->freelist.ql.blink;
433 	poolset->freelist.ql.blink = bf;
434 	bf->ql.blink->ql.flink = bf;
435 }
436 
437 static void brel_before(char *orig_buf, char *new_buf, struct bpoolset *poolset)
438 {
439 	struct bfhead *bf;
440 	struct bhead *b;
441 	bufsize size;
442 	bufsize orig_size;
443 
444 	assert(orig_buf < new_buf);
445 	/* There has to be room for the freebuf header */
446 	size = (bufsize)(new_buf - orig_buf);
447 	assert(size >= (SizeQ + sizeof(struct bhead)));
448 
449 	/* Point to head of original buffer */
450 	bf = BFH(orig_buf - sizeof(struct bhead));
451 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
452 
453 	/* Point to head of the becoming new allocated buffer */
454 	b = BH(new_buf - sizeof(struct bhead));
455 
456 	if (bf->bh.prevfree != 0) {
457 		/* Previous buffer is free, consolidate with that buffer */
458 		struct bfhead *bfp;
459 
460 		/* Update the previous free buffer */
461 		bfp = BFH((char *)bf - bf->bh.prevfree);
462 		assert(bfp->bh.bsize == bf->bh.prevfree);
463 		bfp->bh.bsize += size;
464 
465 		/* Make a new allocated buffer header */
466 		b->prevfree = bfp->bh.bsize;
467 		/* Make it negative since it's an allocated buffer */
468 		b->bsize = -(orig_size - size);
469 	} else {
470 		/*
471 		 * Previous buffer is allocated, create a new buffer and
472 		 * insert on the free list.
473 		 */
474 
475 		/* Make it negative since it's an allocated buffer */
476 		b->bsize = -(orig_size - size);
477 
478 		create_free_block(bf, size, b, poolset);
479 	}
480 
481 #ifdef BufStats
482 	poolset->totalloc -= size;
483 	assert(poolset->totalloc >= 0);
484 #endif
485 }
486 
487 static void brel_after(char *buf, bufsize size, struct bpoolset *poolset)
488 {
489 	struct bhead *b = BH(buf - sizeof(struct bhead));
490 	struct bhead *bn;
491 	bufsize new_size = size;
492 	bufsize free_size;
493 
494 	/* Select the size in the same way as in bget() */
495 	if (new_size < SizeQ)
496 		new_size = SizeQ;
497 #ifdef SizeQuant
498 #if SizeQuant > 1
499 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
500 #endif
501 #endif
502 	new_size += sizeof(struct bhead);
503 	assert(new_size <= -b->bsize);
504 
505 	/*
506 	 * Check if there's enough space at the end of the buffer to be
507 	 * able to free anything.
508 	 */
509 	free_size = -b->bsize - new_size;
510 	if (free_size < SizeQ + sizeof(struct bhead))
511 		return;
512 
513 	bn = BH((char *)b - b->bsize);
514 	/*
515 	 * Set the new size of the buffer;
516 	 */
517 	b->bsize = -new_size;
518 	if (bn->bsize > 0) {
519 		/* Next buffer is free, consolidate with that buffer */
520 		struct bfhead *bfn = BFH(bn);
521 		struct bfhead *nbf = BFH((char *)b + new_size);
522 		struct bhead *bnn = BH((char *)bn + bn->bsize);
523 
524 		assert(bfn->bh.prevfree == 0);
525 		assert(bnn->prevfree == bfn->bh.bsize);
526 
527 		/* Construct the new free header */
528 		nbf->bh.prevfree = 0;
529 		nbf->bh.bsize = bfn->bh.bsize + free_size;
530 
531 		/* Update the buffer after this to point to this header */
532 		bnn->prevfree += free_size;
533 
534 		/*
535 		 * Unlink the previous free buffer and link the new free
536 		 * buffer.
537 		 */
538 		assert(bfn->ql.blink->ql.flink == bfn);
539 		assert(bfn->ql.flink->ql.blink == bfn);
540 
541 		/* Assing blink and flink from old free buffer */
542 		nbf->ql.blink = bfn->ql.blink;
543 		nbf->ql.flink = bfn->ql.flink;
544 
545 		/* Replace the old free buffer with the new one */
546 		nbf->ql.blink->ql.flink = nbf;
547 		nbf->ql.flink->ql.blink = nbf;
548 	} else {
549 		/* New buffer is allocated, create a new free buffer */
550 		create_free_block(BFH((char *)b + new_size), free_size, bn, poolset);
551 	}
552 
553 #ifdef BufStats
554 	poolset->totalloc -= free_size;
555 	assert(poolset->totalloc >= 0);
556 #endif
557 
558 }
559 
560 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
561 			  size_t size, struct malloc_ctx *ctx)
562 {
563 	size_t s;
564 	uintptr_t b;
565 
566 	raw_malloc_validate_pools(ctx);
567 
568 	if (!IS_POWER_OF_TWO(alignment))
569 		return NULL;
570 
571 	/*
572 	 * Normal malloc with headers always returns something SizeQuant
573 	 * aligned.
574 	 */
575 	if (alignment <= SizeQuant)
576 		return raw_malloc(hdr_size, ftr_size, size, ctx);
577 
578 	s = hdr_size + ftr_size + alignment + size +
579 	    SizeQ + sizeof(struct bhead);
580 
581 	/* Check wapping */
582 	if (s < alignment || s < size)
583 		return NULL;
584 
585 	b = (uintptr_t)bget(s, &ctx->poolset);
586 	if (!b)
587 		goto out;
588 
589 	if ((b + hdr_size) & (alignment - 1)) {
590 		/*
591 		 * Returned buffer is not aligned as requested if the
592 		 * hdr_size is added. Find an offset into the buffer
593 		 * that is far enough in to the buffer to be able to free
594 		 * what's in front.
595 		 */
596 		uintptr_t p;
597 
598 		/*
599 		 * Find the point where the buffer including supplied
600 		 * header size should start.
601 		 */
602 		p = b + hdr_size + alignment;
603 		p &= ~(alignment - 1);
604 		p -= hdr_size;
605 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
606 			p += alignment;
607 		assert((p + hdr_size + ftr_size + size) <= (b + s));
608 
609 		/* Free the front part of the buffer */
610 		brel_before((void *)b, (void *)p, &ctx->poolset);
611 
612 		/* Set the new start of the buffer */
613 		b = p;
614 	}
615 
616 	/*
617 	 * Since b is now aligned, release what we don't need at the end of
618 	 * the buffer.
619 	 */
620 	brel_after((void *)b, hdr_size + ftr_size + size, &ctx->poolset);
621 out:
622 	raw_malloc_return_hook((void *)b, size, ctx);
623 
624 	return (void *)b;
625 }
626 
627 /* Most of the stuff in this function is copied from bgetr() in bget.c */
628 static __maybe_unused bufsize bget_buf_size(void *buf)
629 {
630 	bufsize osize;          /* Old size of buffer */
631 	struct bhead *b;
632 
633 	b = BH(((char *)buf) - sizeof(struct bhead));
634 	osize = -b->bsize;
635 #ifdef BECtl
636 	if (osize == 0) {
637 		/*  Buffer acquired directly through acqfcn. */
638 		struct bdhead *bd;
639 
640 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
641 		osize = bd->tsize - sizeof(struct bdhead);
642 	} else
643 #endif
644 		osize -= sizeof(struct bhead);
645 	assert(osize > 0);
646 	return osize;
647 }
648 
649 #ifdef ENABLE_MDBG
650 
651 struct mdbg_hdr {
652 	const char *fname;
653 	uint16_t line;
654 	uint32_t pl_size;
655 	uint32_t magic;
656 #if defined(ARM64)
657 	uint64_t pad;
658 #endif
659 };
660 
661 #define MDBG_HEADER_MAGIC	0xadadadad
662 #define MDBG_FOOTER_MAGIC	0xecececec
663 
664 static size_t mdbg_get_ftr_size(size_t pl_size)
665 {
666 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
667 
668 	return ftr_pad + sizeof(uint32_t);
669 }
670 
671 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
672 {
673 	uint32_t *footer;
674 
675 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
676 			      mdbg_get_ftr_size(hdr->pl_size));
677 	footer--;
678 	return footer;
679 }
680 
681 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
682 		int lineno, size_t pl_size)
683 {
684 	uint32_t *footer;
685 
686 	hdr->fname = fname;
687 	hdr->line = lineno;
688 	hdr->pl_size = pl_size;
689 	hdr->magic = MDBG_HEADER_MAGIC;
690 
691 	footer = mdbg_get_footer(hdr);
692 	*footer = MDBG_FOOTER_MAGIC;
693 }
694 
695 static void *gen_mdbg_malloc(struct malloc_ctx *ctx, const char *fname,
696 			     int lineno, size_t size)
697 {
698 	struct mdbg_hdr *hdr;
699 	uint32_t exceptions = malloc_lock(ctx);
700 
701 	/*
702 	 * Check struct mdbg_hdr doesn't get bad alignment.
703 	 * This is required by C standard: the buffer returned from
704 	 * malloc() should be aligned with a fundamental alignment.
705 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
706 	 */
707 	COMPILE_TIME_ASSERT(
708 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
709 
710 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
711 			 mdbg_get_ftr_size(size), size, ctx);
712 	if (hdr) {
713 		mdbg_update_hdr(hdr, fname, lineno, size);
714 		hdr++;
715 	}
716 
717 	malloc_unlock(ctx, exceptions);
718 	return hdr;
719 }
720 
721 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
722 {
723 	assert(hdr->magic == MDBG_HEADER_MAGIC);
724 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
725 }
726 
727 static void gen_mdbg_free(struct malloc_ctx *ctx, void *ptr)
728 {
729 	struct mdbg_hdr *hdr = ptr;
730 
731 	if (hdr) {
732 		hdr--;
733 		assert_header(hdr);
734 		hdr->magic = 0;
735 		*mdbg_get_footer(hdr) = 0;
736 		raw_free(hdr, ctx);
737 	}
738 }
739 
740 void free(void *ptr)
741 {
742 	uint32_t exceptions = malloc_lock(&malloc_ctx);
743 
744 	gen_mdbg_free(&malloc_ctx, ptr);
745 	malloc_unlock(&malloc_ctx, exceptions);
746 }
747 
748 static void *gen_mdbg_calloc(struct malloc_ctx *ctx, const char *fname, int lineno,
749 		      size_t nmemb, size_t size)
750 {
751 	struct mdbg_hdr *hdr;
752 	uint32_t exceptions = malloc_lock(ctx);
753 
754 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
755 			  mdbg_get_ftr_size(nmemb * size), nmemb, size,
756 			  ctx);
757 	if (hdr) {
758 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
759 		hdr++;
760 	}
761 	malloc_unlock(ctx, exceptions);
762 	return hdr;
763 }
764 
765 static void *gen_mdbg_realloc_unlocked(struct malloc_ctx *ctx, const char *fname,
766 				       int lineno, void *ptr, size_t size)
767 {
768 	struct mdbg_hdr *hdr = ptr;
769 
770 	if (hdr) {
771 		hdr--;
772 		assert_header(hdr);
773 	}
774 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
775 			   mdbg_get_ftr_size(size), size, ctx);
776 	if (hdr) {
777 		mdbg_update_hdr(hdr, fname, lineno, size);
778 		hdr++;
779 	}
780 	return hdr;
781 }
782 
783 static void *gen_mdbg_realloc(struct malloc_ctx *ctx, const char *fname,
784 			      int lineno, void *ptr, size_t size)
785 {
786 	void *p;
787 	uint32_t exceptions = malloc_lock(ctx);
788 
789 	p = gen_mdbg_realloc_unlocked(ctx, fname, lineno, ptr, size);
790 	malloc_unlock(ctx, exceptions);
791 	return p;
792 }
793 
794 #define realloc_unlocked(ctx, ptr, size)					\
795 		gen_mdbg_realloc_unlocked(ctx, __FILE__, __LINE__, (ptr), (size))
796 
797 static void *gen_mdbg_memalign(struct malloc_ctx *ctx, const char *fname,
798 			       int lineno, size_t alignment, size_t size)
799 {
800 	struct mdbg_hdr *hdr;
801 	uint32_t exceptions = malloc_lock(ctx);
802 
803 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
804 			   alignment, size, ctx);
805 	if (hdr) {
806 		mdbg_update_hdr(hdr, fname, lineno, size);
807 		hdr++;
808 	}
809 	malloc_unlock(ctx, exceptions);
810 	return hdr;
811 }
812 
813 
814 static void *get_payload_start_size(void *raw_buf, size_t *size)
815 {
816 	struct mdbg_hdr *hdr = raw_buf;
817 
818 	assert(bget_buf_size(hdr) >= hdr->pl_size);
819 	*size = hdr->pl_size;
820 	return hdr + 1;
821 }
822 
823 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
824 {
825 	struct bpool_iterator itr;
826 	void *b;
827 	uint32_t exceptions = malloc_lock(ctx);
828 
829 	raw_malloc_validate_pools(ctx);
830 
831 	BPOOL_FOREACH(ctx, &itr, &b) {
832 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
833 
834 		assert_header(hdr);
835 
836 		if (bufdump > 0) {
837 			const char *fname = hdr->fname;
838 
839 			if (!fname)
840 				fname = "unknown";
841 
842 			IMSG("buffer: %d bytes %s:%d\n",
843 				hdr->pl_size, fname, hdr->line);
844 		}
845 	}
846 
847 	malloc_unlock(ctx, exceptions);
848 }
849 
850 void *mdbg_malloc(const char *fname, int lineno, size_t size)
851 {
852 	return gen_mdbg_malloc(&malloc_ctx, fname, lineno, size);
853 }
854 
855 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
856 {
857 	return gen_mdbg_calloc(&malloc_ctx, fname, lineno, nmemb, size);
858 }
859 
860 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
861 {
862 	return gen_mdbg_realloc(&malloc_ctx, fname, lineno, ptr, size);
863 }
864 
865 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
866 		    size_t size)
867 {
868 	return gen_mdbg_memalign(&malloc_ctx, fname, lineno, alignment, size);
869 }
870 
871 void mdbg_check(int bufdump)
872 {
873 	gen_mdbg_check(&malloc_ctx, bufdump);
874 }
875 #else
876 
877 void *malloc(size_t size)
878 {
879 	void *p;
880 	uint32_t exceptions = malloc_lock(&malloc_ctx);
881 
882 	p = raw_malloc(0, 0, size, &malloc_ctx);
883 	malloc_unlock(&malloc_ctx, exceptions);
884 	return p;
885 }
886 
887 void free(void *ptr)
888 {
889 	uint32_t exceptions = malloc_lock(&malloc_ctx);
890 
891 	raw_free(ptr, &malloc_ctx);
892 	malloc_unlock(&malloc_ctx, exceptions);
893 }
894 
895 void *calloc(size_t nmemb, size_t size)
896 {
897 	void *p;
898 	uint32_t exceptions = malloc_lock(&malloc_ctx);
899 
900 	p = raw_calloc(0, 0, nmemb, size, &malloc_ctx);
901 	malloc_unlock(&malloc_ctx, exceptions);
902 	return p;
903 }
904 
905 static void *realloc_unlocked(struct malloc_ctx *ctx, void *ptr,
906 			      size_t size)
907 {
908 	return raw_realloc(ptr, 0, 0, size, ctx);
909 }
910 
911 void *realloc(void *ptr, size_t size)
912 {
913 	void *p;
914 	uint32_t exceptions = malloc_lock(&malloc_ctx);
915 
916 	p = realloc_unlocked(&malloc_ctx, ptr, size);
917 	malloc_unlock(&malloc_ctx, exceptions);
918 	return p;
919 }
920 
921 void *memalign(size_t alignment, size_t size)
922 {
923 	void *p;
924 	uint32_t exceptions = malloc_lock(&malloc_ctx);
925 
926 	p = raw_memalign(0, 0, alignment, size, &malloc_ctx);
927 	malloc_unlock(&malloc_ctx, exceptions);
928 	return p;
929 }
930 
931 static void *get_payload_start_size(void *ptr, size_t *size)
932 {
933 	*size = bget_buf_size(ptr);
934 	return ptr;
935 }
936 
937 #endif
938 
939 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
940 {
941 	void *p;
942 	size_t l;
943 	uint32_t exceptions;
944 	uintptr_t start = (uintptr_t)buf;
945 	uintptr_t end = start + len;
946 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
947 					(~(SizeQuant - 1))) +
948 				sizeof(struct bhead) * 2;
949 
950 
951 	start = ROUNDUP(start, SizeQuant);
952 	end = ROUNDDOWN(end, SizeQuant);
953 	assert(start < end);
954 
955 	if ((end - start) < min_len) {
956 		DMSG("Skipping too small pool");
957 		return;
958 	}
959 
960 	exceptions = malloc_lock(ctx);
961 
962 	tag_asan_free((void *)start, end - start);
963 	bpool((void *)start, end - start, &ctx->poolset);
964 	l = ctx->pool_len + 1;
965 	p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
966 	assert(p);
967 	ctx->pool = p;
968 	ctx->pool[ctx->pool_len].buf = (void *)start;
969 	ctx->pool[ctx->pool_len].len = end - start;
970 #ifdef BufStats
971 	ctx->mstats.size += ctx->pool[ctx->pool_len].len;
972 #endif
973 	ctx->pool_len = l;
974 	malloc_unlock(ctx, exceptions);
975 }
976 
977 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
978 						void *buf, size_t len)
979 {
980 	struct bpool_iterator itr;
981 	void *b;
982 	uint8_t *start_buf = buf;
983 	uint8_t *end_buf = start_buf + len;
984 	bool ret = false;
985 	uint32_t exceptions = malloc_lock(ctx);
986 
987 	raw_malloc_validate_pools(ctx);
988 
989 	/* Check for wrapping */
990 	if (start_buf > end_buf)
991 		goto out;
992 
993 	BPOOL_FOREACH(ctx, &itr, &b) {
994 		uint8_t *start_b;
995 		uint8_t *end_b;
996 		size_t s;
997 
998 		start_b = get_payload_start_size(b, &s);
999 		end_b = start_b + s;
1000 
1001 		if (start_buf >= start_b && end_buf <= end_b) {
1002 			ret = true;
1003 			goto out;
1004 		}
1005 	}
1006 
1007 out:
1008 	malloc_unlock(ctx, exceptions);
1009 
1010 	return ret;
1011 }
1012 
1013 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
1014 					    void *buf, size_t len)
1015 {
1016 	uintptr_t buf_start = (uintptr_t) buf;
1017 	uintptr_t buf_end = buf_start + len;
1018 	size_t n;
1019 	bool ret = false;
1020 	uint32_t exceptions = malloc_lock(ctx);
1021 
1022 	raw_malloc_validate_pools(ctx);
1023 
1024 	for (n = 0; n < ctx->pool_len; n++) {
1025 		uintptr_t pool_start = (uintptr_t)ctx->pool[n].buf;
1026 		uintptr_t pool_end = pool_start + ctx->pool[n].len;
1027 
1028 		if (buf_start > buf_end || pool_start > pool_end) {
1029 			ret = true;	/* Wrapping buffers, shouldn't happen */
1030 			goto out;
1031 		}
1032 
1033 		if (buf_end > pool_start || buf_start < pool_end) {
1034 			ret = true;
1035 			goto out;
1036 		}
1037 	}
1038 
1039 out:
1040 	malloc_unlock(ctx, exceptions);
1041 	return ret;
1042 }
1043 
1044 void malloc_add_pool(void *buf, size_t len)
1045 {
1046 	gen_malloc_add_pool(&malloc_ctx, buf, len);
1047 }
1048 
1049 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
1050 {
1051 	return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
1052 }
1053 
1054 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
1055 {
1056 	return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
1057 }
1058