xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 8e81e2f5366a971afdd2ac47fb8529d1def5feb0)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <malloc.h>
104 #include <stdbool.h>
105 #include <stdint.h>
106 #include <stdlib.h>
107 #include <string.h>
108 #include <trace.h>
109 #include <util.h>
110 
111 #if defined(__KERNEL__)
112 /* Compiling for TEE Core */
113 #include <kernel/asan.h>
114 #include <kernel/thread.h>
115 #include <kernel/spinlock.h>
116 
117 static uint32_t malloc_lock(void)
118 {
119 	return cpu_spin_lock_xsave(&__malloc_spinlock);
120 }
121 
122 static void malloc_unlock(uint32_t exceptions)
123 {
124 	cpu_spin_unlock_xrestore(&__malloc_spinlock, exceptions);
125 }
126 
127 static void tag_asan_free(void *buf, size_t len)
128 {
129 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
130 }
131 
132 static void tag_asan_alloced(void *buf, size_t len)
133 {
134 	asan_tag_access(buf, (uint8_t *)buf + len);
135 }
136 
137 static void *memset_unchecked(void *s, int c, size_t n)
138 {
139 	return asan_memset_unchecked(s, c, n);
140 }
141 
142 #else /*__KERNEL__*/
143 /* Compiling for TA */
144 static uint32_t malloc_lock(void)
145 {
146 	return 0;
147 }
148 
149 static void malloc_unlock(uint32_t exceptions __unused)
150 {
151 }
152 
153 static void tag_asan_free(void *buf __unused, size_t len __unused)
154 {
155 }
156 
157 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
158 {
159 }
160 
161 static void *memset_unchecked(void *s, int c, size_t n)
162 {
163 	return memset(s, c, n);
164 }
165 
166 #endif /*__KERNEL__*/
167 
168 #include "bget.c"		/* this is ugly, but this is bget */
169 
170 struct malloc_pool {
171 	void *buf;
172 	size_t len;
173 };
174 
175 static struct malloc_pool *malloc_pool;
176 static size_t malloc_pool_len;
177 
178 #ifdef BufStats
179 
180 static struct malloc_stats mstats;
181 
182 static void raw_malloc_return_hook(void *p, size_t requested_size)
183 {
184 	if (totalloc > mstats.max_allocated)
185 		mstats.max_allocated = totalloc;
186 
187 	if (!p) {
188 		mstats.num_alloc_fail++;
189 		if (requested_size > mstats.biggest_alloc_fail) {
190 			mstats.biggest_alloc_fail = requested_size;
191 			mstats.biggest_alloc_fail_used = totalloc;
192 		}
193 	}
194 }
195 
196 void malloc_reset_stats(void)
197 {
198 	unsigned int exceptions = malloc_lock();
199 
200 	mstats.max_allocated = 0;
201 	mstats.num_alloc_fail = 0;
202 	mstats.biggest_alloc_fail = 0;
203 	mstats.biggest_alloc_fail_used = 0;
204 	malloc_unlock(exceptions);
205 }
206 
207 void malloc_get_stats(struct malloc_stats *stats)
208 {
209 	uint32_t exceptions = malloc_lock();
210 
211 	memcpy(stats, &mstats, sizeof(*stats));
212 	stats->allocated = totalloc;
213 	malloc_unlock(exceptions);
214 }
215 
216 #else /* BufStats */
217 
218 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
219 {
220 }
221 
222 #endif /* BufStats */
223 
224 #ifdef BufValid
225 static void raw_malloc_validate_pools(void)
226 {
227 	size_t n;
228 
229 	for (n = 0; n < malloc_pool_len; n++)
230 		bpoolv(malloc_pool[n].buf);
231 }
232 #else
233 static void raw_malloc_validate_pools(void)
234 {
235 }
236 #endif
237 
238 struct bpool_iterator {
239 	struct bfhead *next_buf;
240 	size_t pool_idx;
241 };
242 
243 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
244 {
245 	iterator->pool_idx = 0;
246 	iterator->next_buf = BFH(malloc_pool[0].buf);
247 }
248 
249 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
250 		size_t *len, bool *isfree)
251 {
252 	struct bfhead *b = iterator->next_buf;
253 	bufsize bs = b->bh.bsize;
254 
255 	if (bs == ESent)
256 		return false;
257 
258 	if (bs < 0) {
259 		/* Allocated buffer */
260 		bs = -bs;
261 
262 		*isfree = false;
263 	} else {
264 		/* Free Buffer */
265 		*isfree = true;
266 
267 		/* Assert that the free list links are intact */
268 		assert(b->ql.blink->ql.flink == b);
269 		assert(b->ql.flink->ql.blink == b);
270 	}
271 
272 	*buf = (uint8_t *)b + sizeof(struct bhead);
273 	*len = bs - sizeof(struct bhead);
274 
275 	iterator->next_buf = BFH((uint8_t *)b + bs);
276 	return true;
277 }
278 
279 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
280 {
281 	while (true) {
282 		size_t len;
283 		bool isfree;
284 
285 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
286 			if (isfree)
287 				continue;
288 			return true;
289 		}
290 
291 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
292 			return false;
293 
294 		iterator->pool_idx++;
295 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
296 	}
297 }
298 
299 /* Convenience macro for looping over all allocated buffers */
300 #define BPOOL_FOREACH(iterator, bp) \
301 		for (bpool_foreach_iterator_init((iterator)); \
302 			bpool_foreach((iterator), (bp));)
303 
304 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
305 {
306 	void *ptr = NULL;
307 	size_t s = hdr_size + ftr_size + pl_size;
308 
309 	/*
310 	 * Make sure that malloc has correct alignment of returned buffers.
311 	 * The assumption is that uintptr_t will be as wide as the largest
312 	 * required alignment of any type.
313 	 */
314 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
315 
316 	raw_malloc_validate_pools();
317 
318 	/* Check wrapping */
319 	if (s < pl_size)
320 		goto out;
321 
322 	/* BGET doesn't like 0 sized allocations */
323 	if (!s)
324 		s++;
325 
326 	ptr = bget(s);
327 out:
328 	raw_malloc_return_hook(ptr, pl_size);
329 
330 	return ptr;
331 }
332 
333 static void raw_free(void *ptr)
334 {
335 	raw_malloc_validate_pools();
336 
337 	if (ptr)
338 		brel(ptr);
339 }
340 
341 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
342 		size_t pl_size)
343 {
344 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
345 	void *ptr = NULL;
346 
347 	raw_malloc_validate_pools();
348 
349 	/* Check wrapping */
350 	if (s < pl_nmemb || s < pl_size)
351 		goto out;
352 
353 	/* BGET doesn't like 0 sized allocations */
354 	if (!s)
355 		s++;
356 
357 	ptr = bgetz(s);
358 out:
359 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
360 
361 	return ptr;
362 }
363 
364 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
365 		size_t pl_size)
366 {
367 	size_t s = hdr_size + ftr_size + pl_size;
368 	void *p = NULL;
369 
370 	/* Check wrapping */
371 	if (s < pl_size)
372 		goto out;
373 
374 	raw_malloc_validate_pools();
375 
376 	/* BGET doesn't like 0 sized allocations */
377 	if (!s)
378 		s++;
379 
380 	p = bgetr(ptr, s);
381 out:
382 	raw_malloc_return_hook(p, pl_size);
383 
384 	return p;
385 }
386 
387 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
388 {
389 	assert(BH((char *)bf + size) == bn);
390 	assert(bn->bsize < 0); /* Next block should be allocated */
391 	/* Next block shouldn't already have free block in front */
392 	assert(bn->prevfree == 0);
393 
394 	/* Create the free buf header */
395 	bf->bh.bsize = size;
396 	bf->bh.prevfree = 0;
397 
398 	/* Update next block to point to the new free buf header */
399 	bn->prevfree = size;
400 
401 	/* Insert the free buffer on the free list */
402 	assert(freelist.ql.blink->ql.flink == &freelist);
403 	assert(freelist.ql.flink->ql.blink == &freelist);
404 	bf->ql.flink = &freelist;
405 	bf->ql.blink = freelist.ql.blink;
406 	freelist.ql.blink = bf;
407 	bf->ql.blink->ql.flink = bf;
408 }
409 
410 static void brel_before(char *orig_buf, char *new_buf)
411 {
412 	struct bfhead *bf;
413 	struct bhead *b;
414 	bufsize size;
415 	bufsize orig_size;
416 
417 	assert(orig_buf < new_buf);
418 	/* There has to be room for the freebuf header */
419 	size = (bufsize)(new_buf - orig_buf);
420 	assert(size >= (SizeQ + sizeof(struct bhead)));
421 
422 	/* Point to head of original buffer */
423 	bf = BFH(orig_buf - sizeof(struct bhead));
424 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
425 
426 	/* Point to head of the becoming new allocated buffer */
427 	b = BH(new_buf - sizeof(struct bhead));
428 
429 	if (bf->bh.prevfree != 0) {
430 		/* Previous buffer is free, consolidate with that buffer */
431 		struct bfhead *bfp;
432 
433 		/* Update the previous free buffer */
434 		bfp = BFH((char *)bf - bf->bh.prevfree);
435 		assert(bfp->bh.bsize == bf->bh.prevfree);
436 		bfp->bh.bsize += size;
437 
438 		/* Make a new allocated buffer header */
439 		b->prevfree = bfp->bh.bsize;
440 		/* Make it negative since it's an allocated buffer */
441 		b->bsize = -(orig_size - size);
442 	} else {
443 		/*
444 		 * Previous buffer is allocated, create a new buffer and
445 		 * insert on the free list.
446 		 */
447 
448 		/* Make it negative since it's an allocated buffer */
449 		b->bsize = -(orig_size - size);
450 
451 		create_free_block(bf, size, b);
452 	}
453 
454 #ifdef BufStats
455 	totalloc -= size;
456 	assert(totalloc >= 0);
457 #endif
458 }
459 
460 static void brel_after(char *buf, bufsize size)
461 {
462 	struct bhead *b = BH(buf - sizeof(struct bhead));
463 	struct bhead *bn;
464 	bufsize new_size = size;
465 	bufsize free_size;
466 
467 	/* Select the size in the same way as in bget() */
468 	if (new_size < SizeQ)
469 		new_size = SizeQ;
470 #ifdef SizeQuant
471 #if SizeQuant > 1
472 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
473 #endif
474 #endif
475 	new_size += sizeof(struct bhead);
476 	assert(new_size <= -b->bsize);
477 
478 	/*
479 	 * Check if there's enough space at the end of the buffer to be
480 	 * able to free anything.
481 	 */
482 	free_size = -b->bsize - new_size;
483 	if (free_size < SizeQ + sizeof(struct bhead))
484 		return;
485 
486 	bn = BH((char *)b - b->bsize);
487 	/*
488 	 * Set the new size of the buffer;
489 	 */
490 	b->bsize = -new_size;
491 	if (bn->bsize > 0) {
492 		/* Next buffer is free, consolidate with that buffer */
493 		struct bfhead *bfn = BFH(bn);
494 		struct bfhead *nbf = BFH((char *)b + new_size);
495 		struct bhead *bnn = BH((char *)bn + bn->bsize);
496 
497 		assert(bfn->bh.prevfree == 0);
498 		assert(bnn->prevfree == bfn->bh.bsize);
499 
500 		/* Construct the new free header */
501 		nbf->bh.prevfree = 0;
502 		nbf->bh.bsize = bfn->bh.bsize + free_size;
503 
504 		/* Update the buffer after this to point to this header */
505 		bnn->prevfree += free_size;
506 
507 		/*
508 		 * Unlink the previous free buffer and link the new free
509 		 * buffer.
510 		 */
511 		assert(bfn->ql.blink->ql.flink == bfn);
512 		assert(bfn->ql.flink->ql.blink == bfn);
513 
514 		/* Assing blink and flink from old free buffer */
515 		nbf->ql.blink = bfn->ql.blink;
516 		nbf->ql.flink = bfn->ql.flink;
517 
518 		/* Replace the old free buffer with the new one */
519 		nbf->ql.blink->ql.flink = nbf;
520 		nbf->ql.flink->ql.blink = nbf;
521 	} else {
522 		/* New buffer is allocated, create a new free buffer */
523 		create_free_block(BFH((char *)b + new_size), free_size, bn);
524 	}
525 
526 #ifdef BufStats
527 	totalloc -= free_size;
528 	assert(totalloc >= 0);
529 #endif
530 
531 }
532 
533 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
534 		size_t size)
535 {
536 	size_t s;
537 	uintptr_t b;
538 
539 	raw_malloc_validate_pools();
540 
541 	if (!IS_POWER_OF_TWO(alignment))
542 		return NULL;
543 
544 	/*
545 	 * Normal malloc with headers always returns something SizeQuant
546 	 * aligned.
547 	 */
548 	if (alignment <= SizeQuant)
549 		return raw_malloc(hdr_size, ftr_size, size);
550 
551 	s = hdr_size + ftr_size + alignment + size +
552 	    SizeQ + sizeof(struct bhead);
553 
554 	/* Check wapping */
555 	if (s < alignment || s < size)
556 		return NULL;
557 
558 	b = (uintptr_t)bget(s);
559 	if (!b)
560 		goto out;
561 
562 	if ((b + hdr_size) & (alignment - 1)) {
563 		/*
564 		 * Returned buffer is not aligned as requested if the
565 		 * hdr_size is added. Find an offset into the buffer
566 		 * that is far enough in to the buffer to be able to free
567 		 * what's in front.
568 		 */
569 		uintptr_t p;
570 
571 		/*
572 		 * Find the point where the buffer including supplied
573 		 * header size should start.
574 		 */
575 		p = b + hdr_size + alignment;
576 		p &= ~(alignment - 1);
577 		p -= hdr_size;
578 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
579 			p += alignment;
580 		assert((p + hdr_size + ftr_size + size) <= (b + s));
581 
582 		/* Free the front part of the buffer */
583 		brel_before((void *)b, (void *)p);
584 
585 		/* Set the new start of the buffer */
586 		b = p;
587 	}
588 
589 	/*
590 	 * Since b is now aligned, release what we don't need at the end of
591 	 * the buffer.
592 	 */
593 	brel_after((void *)b, hdr_size + ftr_size + size);
594 out:
595 	raw_malloc_return_hook((void *)b, size);
596 
597 	return (void *)b;
598 }
599 
600 /* Most of the stuff in this function is copied from bgetr() in bget.c */
601 static __maybe_unused bufsize bget_buf_size(void *buf)
602 {
603 	bufsize osize;          /* Old size of buffer */
604 	struct bhead *b;
605 
606 	b = BH(((char *)buf) - sizeof(struct bhead));
607 	osize = -b->bsize;
608 #ifdef BECtl
609 	if (osize == 0) {
610 		/*  Buffer acquired directly through acqfcn. */
611 		struct bdhead *bd;
612 
613 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
614 		osize = bd->tsize - sizeof(struct bdhead);
615 	} else
616 #endif
617 		osize -= sizeof(struct bhead);
618 	assert(osize > 0);
619 	return osize;
620 }
621 
622 #ifdef ENABLE_MDBG
623 
624 struct mdbg_hdr {
625 	const char *fname;
626 	uint16_t line;
627 	uint32_t pl_size;
628 	uint32_t magic;
629 #if defined(ARM64)
630 	uint64_t pad;
631 #endif
632 };
633 
634 #define MDBG_HEADER_MAGIC	0xadadadad
635 #define MDBG_FOOTER_MAGIC	0xecececec
636 
637 static size_t mdbg_get_ftr_size(size_t pl_size)
638 {
639 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
640 
641 	return ftr_pad + sizeof(uint32_t);
642 }
643 
644 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
645 {
646 	uint32_t *footer;
647 
648 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
649 			      mdbg_get_ftr_size(hdr->pl_size));
650 	footer--;
651 	return footer;
652 }
653 
654 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
655 		int lineno, size_t pl_size)
656 {
657 	uint32_t *footer;
658 
659 	hdr->fname = fname;
660 	hdr->line = lineno;
661 	hdr->pl_size = pl_size;
662 	hdr->magic = MDBG_HEADER_MAGIC;
663 
664 	footer = mdbg_get_footer(hdr);
665 	*footer = MDBG_FOOTER_MAGIC;
666 }
667 
668 void *mdbg_malloc(const char *fname, int lineno, size_t size)
669 {
670 	struct mdbg_hdr *hdr;
671 	uint32_t exceptions = malloc_lock();
672 
673 	/*
674 	 * Check struct mdbg_hdr doesn't get bad alignment.
675 	 * This is required by C standard: the buffer returned from
676 	 * malloc() should be aligned with a fundamental alignment.
677 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
678 	 */
679 	COMPILE_TIME_ASSERT(
680 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
681 
682 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
683 			  mdbg_get_ftr_size(size), size);
684 	if (hdr) {
685 		mdbg_update_hdr(hdr, fname, lineno, size);
686 		hdr++;
687 	}
688 
689 	malloc_unlock(exceptions);
690 	return hdr;
691 }
692 
693 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
694 {
695 	assert(hdr->magic == MDBG_HEADER_MAGIC);
696 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
697 }
698 
699 static void mdbg_free(void *ptr)
700 {
701 	struct mdbg_hdr *hdr = ptr;
702 
703 	if (hdr) {
704 		hdr--;
705 		assert_header(hdr);
706 		hdr->magic = 0;
707 		*mdbg_get_footer(hdr) = 0;
708 		raw_free(hdr);
709 	}
710 }
711 
712 void free(void *ptr)
713 {
714 	uint32_t exceptions = malloc_lock();
715 
716 	mdbg_free(ptr);
717 	malloc_unlock(exceptions);
718 }
719 
720 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
721 {
722 	struct mdbg_hdr *hdr;
723 	uint32_t exceptions = malloc_lock();
724 
725 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
726 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
727 	if (hdr) {
728 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
729 		hdr++;
730 	}
731 	malloc_unlock(exceptions);
732 	return hdr;
733 }
734 
735 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
736 			    void *ptr, size_t size)
737 {
738 	struct mdbg_hdr *hdr = ptr;
739 
740 	if (hdr) {
741 		hdr--;
742 		assert_header(hdr);
743 	}
744 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
745 			   mdbg_get_ftr_size(size), size);
746 	if (hdr) {
747 		mdbg_update_hdr(hdr, fname, lineno, size);
748 		hdr++;
749 	}
750 	return hdr;
751 }
752 
753 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
754 {
755 	void *p;
756 	uint32_t exceptions = malloc_lock();
757 
758 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
759 	malloc_unlock(exceptions);
760 	return p;
761 }
762 
763 #define realloc_unlocked(ptr, size) \
764 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
765 
766 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
767 		size_t size)
768 {
769 	struct mdbg_hdr *hdr;
770 	uint32_t exceptions = malloc_lock();
771 
772 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
773 			   alignment, size);
774 	if (hdr) {
775 		mdbg_update_hdr(hdr, fname, lineno, size);
776 		hdr++;
777 	}
778 	malloc_unlock(exceptions);
779 	return hdr;
780 }
781 
782 
783 static void *get_payload_start_size(void *raw_buf, size_t *size)
784 {
785 	struct mdbg_hdr *hdr = raw_buf;
786 
787 	assert(bget_buf_size(hdr) >= hdr->pl_size);
788 	*size = hdr->pl_size;
789 	return hdr + 1;
790 }
791 
792 void mdbg_check(int bufdump)
793 {
794 	struct bpool_iterator itr;
795 	void *b;
796 	uint32_t exceptions = malloc_lock();
797 
798 	raw_malloc_validate_pools();
799 
800 	BPOOL_FOREACH(&itr, &b) {
801 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
802 
803 		assert_header(hdr);
804 
805 		if (bufdump > 0) {
806 			const char *fname = hdr->fname;
807 
808 			if (!fname)
809 				fname = "unknown";
810 
811 			IMSG("buffer: %d bytes %s:%d\n",
812 				hdr->pl_size, fname, hdr->line);
813 		}
814 	}
815 
816 	malloc_unlock(exceptions);
817 }
818 
819 #else
820 
821 void *malloc(size_t size)
822 {
823 	void *p;
824 	uint32_t exceptions = malloc_lock();
825 
826 	p = raw_malloc(0, 0, size);
827 	malloc_unlock(exceptions);
828 	return p;
829 }
830 
831 void free(void *ptr)
832 {
833 	uint32_t exceptions = malloc_lock();
834 
835 	raw_free(ptr);
836 	malloc_unlock(exceptions);
837 }
838 
839 void *calloc(size_t nmemb, size_t size)
840 {
841 	void *p;
842 	uint32_t exceptions = malloc_lock();
843 
844 	p = raw_calloc(0, 0, nmemb, size);
845 	malloc_unlock(exceptions);
846 	return p;
847 }
848 
849 static void *realloc_unlocked(void *ptr, size_t size)
850 {
851 	return raw_realloc(ptr, 0, 0, size);
852 }
853 
854 void *realloc(void *ptr, size_t size)
855 {
856 	void *p;
857 	uint32_t exceptions = malloc_lock();
858 
859 	p = realloc_unlocked(ptr, size);
860 	malloc_unlock(exceptions);
861 	return p;
862 }
863 
864 void *memalign(size_t alignment, size_t size)
865 {
866 	void *p;
867 	uint32_t exceptions = malloc_lock();
868 
869 	p = raw_memalign(0, 0, alignment, size);
870 	malloc_unlock(exceptions);
871 	return p;
872 }
873 
874 static void *get_payload_start_size(void *ptr, size_t *size)
875 {
876 	*size = bget_buf_size(ptr);
877 	return ptr;
878 }
879 
880 #endif
881 
882 void malloc_add_pool(void *buf, size_t len)
883 {
884 	void *p;
885 	size_t l;
886 	uint32_t exceptions;
887 	uintptr_t start = (uintptr_t)buf;
888 	uintptr_t end = start + len;
889 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
890 					(~(SizeQuant - 1))) +
891 				sizeof(struct bhead) * 2;
892 
893 
894 	start = ROUNDUP(start, SizeQuant);
895 	end = ROUNDDOWN(end, SizeQuant);
896 	assert(start < end);
897 
898 	if ((end - start) < min_len) {
899 		DMSG("Skipping too small pool");
900 		return;
901 	}
902 
903 	exceptions = malloc_lock();
904 	tag_asan_free((void *)start, end - start);
905 	bpool((void *)start, end - start);
906 	l = malloc_pool_len + 1;
907 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
908 	assert(p);
909 	malloc_pool = p;
910 	malloc_pool[malloc_pool_len].buf = (void *)start;
911 	malloc_pool[malloc_pool_len].len = end - start;
912 #ifdef BufStats
913 	mstats.size += malloc_pool[malloc_pool_len].len;
914 #endif
915 	malloc_pool_len = l;
916 	malloc_unlock(exceptions);
917 }
918 
919 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
920 {
921 	struct bpool_iterator itr;
922 	void *b;
923 	uint8_t *start_buf = buf;
924 	uint8_t *end_buf = start_buf + len;
925 	bool ret = false;
926 	uint32_t exceptions = malloc_lock();
927 
928 	raw_malloc_validate_pools();
929 
930 	/* Check for wrapping */
931 	if (start_buf > end_buf)
932 		goto out;
933 
934 	BPOOL_FOREACH(&itr, &b) {
935 		uint8_t *start_b;
936 		uint8_t *end_b;
937 		size_t s;
938 
939 		start_b = get_payload_start_size(b, &s);
940 		end_b = start_b + s;
941 
942 		if (start_buf >= start_b && end_buf <= end_b) {
943 			ret = true;
944 			goto out;
945 		}
946 	}
947 
948 out:
949 	malloc_unlock(exceptions);
950 
951 	return ret;
952 }
953 
954 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
955 {
956 	uintptr_t buf_start = (uintptr_t) buf;
957 	uintptr_t buf_end = buf_start + len;
958 	size_t n;
959 	bool ret = false;
960 	uint32_t exceptions = malloc_lock();
961 
962 	raw_malloc_validate_pools();
963 
964 	for (n = 0; n < malloc_pool_len; n++) {
965 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
966 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
967 
968 		if (buf_start > buf_end || pool_start > pool_end) {
969 			ret = true;	/* Wrapping buffers, shouldn't happen */
970 			goto out;
971 		}
972 
973 		if (buf_end > pool_start || buf_start < pool_end) {
974 			ret = true;
975 			goto out;
976 		}
977 	}
978 
979 out:
980 	malloc_unlock(exceptions);
981 	return ret;
982 }
983