xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 5acb1bc6e8ece254ffe7dbdc41605ad5613b6ab7)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <stdlib.h>
104 #include <stdint.h>
105 #include <stdbool.h>
106 #include <malloc.h>
107 #include <util.h>
108 #include <trace.h>
109 
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/thread.h>
114 #include <kernel/spinlock.h>
115 
116 static uint32_t malloc_lock(void)
117 {
118 	uint32_t exceptions;
119 
120 	exceptions = thread_mask_exceptions(
121 			THREAD_EXCP_NATIVE_INTR | THREAD_EXCP_FOREIGN_INTR);
122 	cpu_spin_lock(&__malloc_spinlock);
123 	return exceptions;
124 }
125 
126 static void malloc_unlock(uint32_t exceptions)
127 {
128 	cpu_spin_unlock(&__malloc_spinlock);
129 	thread_unmask_exceptions(exceptions);
130 }
131 
132 static void tag_asan_free(void *buf, size_t len)
133 {
134 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
135 }
136 
137 static void tag_asan_alloced(void *buf, size_t len)
138 {
139 	asan_tag_access(buf, (uint8_t *)buf + len);
140 }
141 
142 #else /*__KERNEL__*/
143 /* Compiling for TA */
144 static uint32_t malloc_lock(void)
145 {
146 	return 0;
147 }
148 
149 static void malloc_unlock(uint32_t exceptions __unused)
150 {
151 }
152 
153 static void tag_asan_free(void *buf __unused, size_t len __unused)
154 {
155 }
156 
157 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
158 {
159 }
160 #endif /*__KERNEL__*/
161 
162 #include "bget.c"		/* this is ugly, but this is bget */
163 
164 struct malloc_pool {
165 	void *buf;
166 	size_t len;
167 };
168 
169 static struct malloc_pool *malloc_pool;
170 static size_t malloc_pool_len;
171 
172 #ifdef BufStats
173 
174 static struct malloc_stats mstats;
175 
176 static void raw_malloc_return_hook(void *p, size_t requested_size)
177 {
178 	if (totalloc > mstats.max_allocated)
179 		mstats.max_allocated = totalloc;
180 
181 	if (!p) {
182 		mstats.num_alloc_fail++;
183 		if (requested_size > mstats.biggest_alloc_fail) {
184 			mstats.biggest_alloc_fail = requested_size;
185 			mstats.biggest_alloc_fail_used = totalloc;
186 		}
187 	}
188 }
189 
190 void malloc_reset_stats(void)
191 {
192 	unsigned int exceptions = malloc_lock();
193 
194 	mstats.max_allocated = 0;
195 	mstats.num_alloc_fail = 0;
196 	mstats.biggest_alloc_fail = 0;
197 	mstats.biggest_alloc_fail_used = 0;
198 	malloc_unlock(exceptions);
199 }
200 
201 void malloc_get_stats(struct malloc_stats *stats)
202 {
203 	uint32_t exceptions = malloc_lock();
204 
205 	memcpy(stats, &mstats, sizeof(*stats));
206 	stats->allocated = totalloc;
207 	malloc_unlock(exceptions);
208 }
209 
210 #else /* BufStats */
211 
212 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
213 {
214 }
215 
216 #endif /* BufStats */
217 
218 #ifdef BufValid
219 static void raw_malloc_validate_pools(void)
220 {
221 	size_t n;
222 
223 	for (n = 0; n < malloc_pool_len; n++)
224 		bpoolv(malloc_pool[n].buf);
225 }
226 #else
227 static void raw_malloc_validate_pools(void)
228 {
229 }
230 #endif
231 
232 struct bpool_iterator {
233 	struct bfhead *next_buf;
234 	size_t pool_idx;
235 };
236 
237 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
238 {
239 	iterator->pool_idx = 0;
240 	iterator->next_buf = BFH(malloc_pool[0].buf);
241 }
242 
243 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
244 		size_t *len, bool *isfree)
245 {
246 	struct bfhead *b = iterator->next_buf;
247 	bufsize bs = b->bh.bsize;
248 
249 	if (bs == ESent)
250 		return false;
251 
252 	if (bs < 0) {
253 		/* Allocated buffer */
254 		bs = -bs;
255 
256 		*isfree = false;
257 	} else {
258 		/* Free Buffer */
259 		*isfree = true;
260 
261 		/* Assert that the free list links are intact */
262 		assert(b->ql.blink->ql.flink == b);
263 		assert(b->ql.flink->ql.blink == b);
264 	}
265 
266 	*buf = (uint8_t *)b + sizeof(struct bhead);
267 	*len = bs - sizeof(struct bhead);
268 
269 	iterator->next_buf = BFH((uint8_t *)b + bs);
270 	return true;
271 }
272 
273 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
274 {
275 	while (true) {
276 		size_t len;
277 		bool isfree;
278 
279 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
280 			if (isfree)
281 				continue;
282 			return true;
283 		}
284 
285 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
286 			return false;
287 
288 		iterator->pool_idx++;
289 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
290 	}
291 }
292 
293 /* Convenience macro for looping over all allocated buffers */
294 #define BPOOL_FOREACH(iterator, bp) \
295 		for (bpool_foreach_iterator_init((iterator)); \
296 			bpool_foreach((iterator), (bp));)
297 
298 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
299 {
300 	void *ptr = NULL;
301 	size_t s = hdr_size + ftr_size + pl_size;
302 
303 	/*
304 	 * Make sure that malloc has correct alignment of returned buffers.
305 	 * The assumption is that uintptr_t will be as wide as the largest
306 	 * required alignment of any type.
307 	 */
308 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
309 
310 	raw_malloc_validate_pools();
311 
312 	/* Check wrapping */
313 	if (s < pl_size)
314 		goto out;
315 
316 	/* BGET doesn't like 0 sized allocations */
317 	if (!s)
318 		s++;
319 
320 	ptr = bget(s);
321 out:
322 	raw_malloc_return_hook(ptr, pl_size);
323 
324 	return ptr;
325 }
326 
327 static void raw_free(void *ptr)
328 {
329 	raw_malloc_validate_pools();
330 
331 	if (ptr)
332 		brel(ptr);
333 }
334 
335 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
336 		size_t pl_size)
337 {
338 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
339 	void *ptr = NULL;
340 
341 	raw_malloc_validate_pools();
342 
343 	/* Check wrapping */
344 	if (s < pl_nmemb || s < pl_size)
345 		goto out;
346 
347 	/* BGET doesn't like 0 sized allocations */
348 	if (!s)
349 		s++;
350 
351 	ptr = bgetz(s);
352 out:
353 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
354 
355 	return ptr;
356 }
357 
358 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
359 		size_t pl_size)
360 {
361 	size_t s = hdr_size + ftr_size + pl_size;
362 	void *p = NULL;
363 
364 	/* Check wrapping */
365 	if (s < pl_size)
366 		goto out;
367 
368 	raw_malloc_validate_pools();
369 
370 	/* BGET doesn't like 0 sized allocations */
371 	if (!s)
372 		s++;
373 
374 	p = bgetr(ptr, s);
375 out:
376 	raw_malloc_return_hook(p, pl_size);
377 
378 	return p;
379 }
380 
381 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
382 {
383 	assert(BH((char *)bf + size) == bn);
384 	assert(bn->bsize < 0); /* Next block should be allocated */
385 	/* Next block shouldn't already have free block in front */
386 	assert(bn->prevfree == 0);
387 
388 	/* Create the free buf header */
389 	bf->bh.bsize = size;
390 	bf->bh.prevfree = 0;
391 
392 	/* Update next block to point to the new free buf header */
393 	bn->prevfree = size;
394 
395 	/* Insert the free buffer on the free list */
396 	assert(freelist.ql.blink->ql.flink == &freelist);
397 	assert(freelist.ql.flink->ql.blink == &freelist);
398 	bf->ql.flink = &freelist;
399 	bf->ql.blink = freelist.ql.blink;
400 	freelist.ql.blink = bf;
401 	bf->ql.blink->ql.flink = bf;
402 }
403 
404 static void brel_before(char *orig_buf, char *new_buf)
405 {
406 	struct bfhead *bf;
407 	struct bhead *b;
408 	bufsize size;
409 	bufsize orig_size;
410 
411 	assert(orig_buf < new_buf);
412 	/* There has to be room for the freebuf header */
413 	size = (bufsize)(new_buf - orig_buf);
414 	assert(size >= (SizeQ + sizeof(struct bhead)));
415 
416 	/* Point to head of original buffer */
417 	bf = BFH(orig_buf - sizeof(struct bhead));
418 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
419 
420 	/* Point to head of the becoming new allocated buffer */
421 	b = BH(new_buf - sizeof(struct bhead));
422 
423 	if (bf->bh.prevfree != 0) {
424 		/* Previous buffer is free, consolidate with that buffer */
425 		struct bfhead *bfp;
426 
427 		/* Update the previous free buffer */
428 		bfp = BFH((char *)bf - bf->bh.prevfree);
429 		assert(bfp->bh.bsize == bf->bh.prevfree);
430 		bfp->bh.bsize += size;
431 
432 		/* Make a new allocated buffer header */
433 		b->prevfree = bfp->bh.bsize;
434 		/* Make it negative since it's an allocated buffer */
435 		b->bsize = -(orig_size - size);
436 	} else {
437 		/*
438 		 * Previous buffer is allocated, create a new buffer and
439 		 * insert on the free list.
440 		 */
441 
442 		/* Make it negative since it's an allocated buffer */
443 		b->bsize = -(orig_size - size);
444 
445 		create_free_block(bf, size, b);
446 	}
447 
448 #ifdef BufStats
449 	totalloc -= size;
450 	assert(totalloc >= 0);
451 #endif
452 }
453 
454 static void brel_after(char *buf, bufsize size)
455 {
456 	struct bhead *b = BH(buf - sizeof(struct bhead));
457 	struct bhead *bn;
458 	bufsize new_size = size;
459 	bufsize free_size;
460 
461 	/* Select the size in the same way as in bget() */
462 	if (new_size < SizeQ)
463 		new_size = SizeQ;
464 #ifdef SizeQuant
465 #if SizeQuant > 1
466 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
467 #endif
468 #endif
469 	new_size += sizeof(struct bhead);
470 	assert(new_size <= -b->bsize);
471 
472 	/*
473 	 * Check if there's enough space at the end of the buffer to be
474 	 * able to free anything.
475 	 */
476 	free_size = -b->bsize - new_size;
477 	if (free_size < SizeQ + sizeof(struct bhead))
478 		return;
479 
480 	bn = BH((char *)b - b->bsize);
481 	/*
482 	 * Set the new size of the buffer;
483 	 */
484 	b->bsize = -new_size;
485 	if (bn->bsize > 0) {
486 		/* Next buffer is free, consolidate with that buffer */
487 		struct bfhead *bfn = BFH(bn);
488 		struct bfhead *nbf = BFH((char *)b + new_size);
489 		struct bhead *bnn = BH((char *)bn + bn->bsize);
490 
491 		assert(bfn->bh.prevfree == 0);
492 		assert(bnn->prevfree == bfn->bh.bsize);
493 
494 		/* Construct the new free header */
495 		nbf->bh.prevfree = 0;
496 		nbf->bh.bsize = bfn->bh.bsize + free_size;
497 
498 		/* Update the buffer after this to point to this header */
499 		bnn->prevfree += free_size;
500 
501 		/*
502 		 * Unlink the previous free buffer and link the new free
503 		 * buffer.
504 		 */
505 		assert(bfn->ql.blink->ql.flink == bfn);
506 		assert(bfn->ql.flink->ql.blink == bfn);
507 
508 		/* Assing blink and flink from old free buffer */
509 		nbf->ql.blink = bfn->ql.blink;
510 		nbf->ql.flink = bfn->ql.flink;
511 
512 		/* Replace the old free buffer with the new one */
513 		nbf->ql.blink->ql.flink = nbf;
514 		nbf->ql.flink->ql.blink = nbf;
515 	} else {
516 		/* New buffer is allocated, create a new free buffer */
517 		create_free_block(BFH((char *)b + new_size), free_size, bn);
518 	}
519 
520 #ifdef BufStats
521 	totalloc -= free_size;
522 	assert(totalloc >= 0);
523 #endif
524 
525 }
526 
527 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
528 		size_t size)
529 {
530 	size_t s;
531 	uintptr_t b;
532 
533 	raw_malloc_validate_pools();
534 
535 	if (!IS_POWER_OF_TWO(alignment))
536 		return NULL;
537 
538 	/*
539 	 * Normal malloc with headers always returns something SizeQuant
540 	 * aligned.
541 	 */
542 	if (alignment <= SizeQuant)
543 		return raw_malloc(hdr_size, ftr_size, size);
544 
545 	s = hdr_size + ftr_size + alignment + size +
546 	    SizeQ + sizeof(struct bhead);
547 
548 	/* Check wapping */
549 	if (s < alignment || s < size)
550 		return NULL;
551 
552 	b = (uintptr_t)bget(s);
553 	if (!b)
554 		goto out;
555 
556 	if ((b + hdr_size) & (alignment - 1)) {
557 		/*
558 		 * Returned buffer is not aligned as requested if the
559 		 * hdr_size is added. Find an offset into the buffer
560 		 * that is far enough in to the buffer to be able to free
561 		 * what's in front.
562 		 */
563 		uintptr_t p;
564 
565 		/*
566 		 * Find the point where the buffer including supplied
567 		 * header size should start.
568 		 */
569 		p = b + hdr_size + alignment;
570 		p &= ~(alignment - 1);
571 		p -= hdr_size;
572 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
573 			p += alignment;
574 		assert((p + hdr_size + ftr_size + size) <= (b + s));
575 
576 		/* Free the front part of the buffer */
577 		brel_before((void *)b, (void *)p);
578 
579 		/* Set the new start of the buffer */
580 		b = p;
581 	}
582 
583 	/*
584 	 * Since b is now aligned, release what we don't need at the end of
585 	 * the buffer.
586 	 */
587 	brel_after((void *)b, hdr_size + ftr_size + size);
588 out:
589 	raw_malloc_return_hook((void *)b, size);
590 
591 	return (void *)b;
592 }
593 
594 /* Most of the stuff in this function is copied from bgetr() in bget.c */
595 static __maybe_unused bufsize bget_buf_size(void *buf)
596 {
597 	bufsize osize;          /* Old size of buffer */
598 	struct bhead *b;
599 
600 	b = BH(((char *)buf) - sizeof(struct bhead));
601 	osize = -b->bsize;
602 #ifdef BECtl
603 	if (osize == 0) {
604 		/*  Buffer acquired directly through acqfcn. */
605 		struct bdhead *bd;
606 
607 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
608 		osize = bd->tsize - sizeof(struct bdhead);
609 	} else
610 #endif
611 		osize -= sizeof(struct bhead);
612 	assert(osize > 0);
613 	return osize;
614 }
615 
616 #ifdef ENABLE_MDBG
617 
618 struct mdbg_hdr {
619 	const char *fname;
620 	uint16_t line;
621 	uint32_t pl_size;
622 	uint32_t magic;
623 #if defined(ARM64)
624 	uint64_t pad;
625 #endif
626 };
627 
628 #define MDBG_HEADER_MAGIC	0xadadadad
629 #define MDBG_FOOTER_MAGIC	0xecececec
630 
631 static size_t mdbg_get_ftr_size(size_t pl_size)
632 {
633 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
634 
635 	return ftr_pad + sizeof(uint32_t);
636 }
637 
638 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
639 {
640 	uint32_t *footer;
641 
642 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
643 			      mdbg_get_ftr_size(hdr->pl_size));
644 	footer--;
645 	return footer;
646 }
647 
648 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
649 		int lineno, size_t pl_size)
650 {
651 	uint32_t *footer;
652 
653 	hdr->fname = fname;
654 	hdr->line = lineno;
655 	hdr->pl_size = pl_size;
656 	hdr->magic = MDBG_HEADER_MAGIC;
657 
658 	footer = mdbg_get_footer(hdr);
659 	*footer = MDBG_FOOTER_MAGIC;
660 }
661 
662 void *mdbg_malloc(const char *fname, int lineno, size_t size)
663 {
664 	struct mdbg_hdr *hdr;
665 	uint32_t exceptions = malloc_lock();
666 
667 	/*
668 	 * Check struct mdbg_hdr doesn't get bad alignment.
669 	 * This is required by C standard: the buffer returned from
670 	 * malloc() should be aligned with a fundamental alignment.
671 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
672 	 */
673 	COMPILE_TIME_ASSERT(
674 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
675 
676 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
677 			  mdbg_get_ftr_size(size), size);
678 	if (hdr) {
679 		mdbg_update_hdr(hdr, fname, lineno, size);
680 		hdr++;
681 	}
682 
683 	malloc_unlock(exceptions);
684 	return hdr;
685 }
686 
687 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
688 {
689 	assert(hdr->magic == MDBG_HEADER_MAGIC);
690 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
691 }
692 
693 static void mdbg_free(void *ptr)
694 {
695 	struct mdbg_hdr *hdr = ptr;
696 
697 	if (hdr) {
698 		hdr--;
699 		assert_header(hdr);
700 		hdr->magic = 0;
701 		*mdbg_get_footer(hdr) = 0;
702 		raw_free(hdr);
703 	}
704 }
705 
706 void free(void *ptr)
707 {
708 	uint32_t exceptions = malloc_lock();
709 
710 	mdbg_free(ptr);
711 	malloc_unlock(exceptions);
712 }
713 
714 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
715 {
716 	struct mdbg_hdr *hdr;
717 	uint32_t exceptions = malloc_lock();
718 
719 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
720 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
721 	if (hdr) {
722 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
723 		hdr++;
724 	}
725 	malloc_unlock(exceptions);
726 	return hdr;
727 }
728 
729 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
730 			    void *ptr, size_t size)
731 {
732 	struct mdbg_hdr *hdr = ptr;
733 
734 	if (hdr) {
735 		hdr--;
736 		assert_header(hdr);
737 	}
738 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
739 			   mdbg_get_ftr_size(size), size);
740 	if (hdr) {
741 		mdbg_update_hdr(hdr, fname, lineno, size);
742 		hdr++;
743 	}
744 	return hdr;
745 }
746 
747 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
748 {
749 	void *p;
750 	uint32_t exceptions = malloc_lock();
751 
752 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
753 	malloc_unlock(exceptions);
754 	return p;
755 }
756 
757 #define realloc_unlocked(ptr, size) \
758 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
759 
760 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
761 		size_t size)
762 {
763 	struct mdbg_hdr *hdr;
764 	uint32_t exceptions = malloc_lock();
765 
766 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
767 			   alignment, size);
768 	if (hdr) {
769 		mdbg_update_hdr(hdr, fname, lineno, size);
770 		hdr++;
771 	}
772 	malloc_unlock(exceptions);
773 	return hdr;
774 }
775 
776 
777 static void *get_payload_start_size(void *raw_buf, size_t *size)
778 {
779 	struct mdbg_hdr *hdr = raw_buf;
780 
781 	assert(bget_buf_size(hdr) >= hdr->pl_size);
782 	*size = hdr->pl_size;
783 	return hdr + 1;
784 }
785 
786 void mdbg_check(int bufdump)
787 {
788 	struct bpool_iterator itr;
789 	void *b;
790 	uint32_t exceptions = malloc_lock();
791 
792 	raw_malloc_validate_pools();
793 
794 	BPOOL_FOREACH(&itr, &b) {
795 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
796 
797 		assert_header(hdr);
798 
799 		if (bufdump > 0) {
800 			const char *fname = hdr->fname;
801 
802 			if (!fname)
803 				fname = "unknown";
804 
805 			IMSG("buffer: %d bytes %s:%d\n",
806 				hdr->pl_size, fname, hdr->line);
807 		}
808 	}
809 
810 	malloc_unlock(exceptions);
811 }
812 
813 #else
814 
815 void *malloc(size_t size)
816 {
817 	void *p;
818 	uint32_t exceptions = malloc_lock();
819 
820 	p = raw_malloc(0, 0, size);
821 	malloc_unlock(exceptions);
822 	return p;
823 }
824 
825 void free(void *ptr)
826 {
827 	uint32_t exceptions = malloc_lock();
828 
829 	raw_free(ptr);
830 	malloc_unlock(exceptions);
831 }
832 
833 void *calloc(size_t nmemb, size_t size)
834 {
835 	void *p;
836 	uint32_t exceptions = malloc_lock();
837 
838 	p = raw_calloc(0, 0, nmemb, size);
839 	malloc_unlock(exceptions);
840 	return p;
841 }
842 
843 static void *realloc_unlocked(void *ptr, size_t size)
844 {
845 	return raw_realloc(ptr, 0, 0, size);
846 }
847 
848 void *realloc(void *ptr, size_t size)
849 {
850 	void *p;
851 	uint32_t exceptions = malloc_lock();
852 
853 	p = realloc_unlocked(ptr, size);
854 	malloc_unlock(exceptions);
855 	return p;
856 }
857 
858 void *memalign(size_t alignment, size_t size)
859 {
860 	void *p;
861 	uint32_t exceptions = malloc_lock();
862 
863 	p = raw_memalign(0, 0, alignment, size);
864 	malloc_unlock(exceptions);
865 	return p;
866 }
867 
868 static void *get_payload_start_size(void *ptr, size_t *size)
869 {
870 	*size = bget_buf_size(ptr);
871 	return ptr;
872 }
873 
874 #endif
875 
876 void malloc_add_pool(void *buf, size_t len)
877 {
878 	void *p;
879 	size_t l;
880 	uint32_t exceptions;
881 	uintptr_t start = (uintptr_t)buf;
882 	uintptr_t end = start + len;
883 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
884 					(~(SizeQuant - 1))) +
885 				sizeof(struct bhead) * 2;
886 
887 
888 	start = ROUNDUP(start, SizeQuant);
889 	end = ROUNDDOWN(end, SizeQuant);
890 	assert(start < end);
891 
892 	if ((end - start) < min_len) {
893 		DMSG("Skipping too small pool");
894 		return;
895 	}
896 
897 	exceptions = malloc_lock();
898 	tag_asan_free((void *)start, end - start);
899 	bpool((void *)start, end - start);
900 	l = malloc_pool_len + 1;
901 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
902 	assert(p);
903 	malloc_pool = p;
904 	malloc_pool[malloc_pool_len].buf = (void *)start;
905 	malloc_pool[malloc_pool_len].len = end - start;
906 #ifdef BufStats
907 	mstats.size += malloc_pool[malloc_pool_len].len;
908 #endif
909 	malloc_pool_len = l;
910 	malloc_unlock(exceptions);
911 }
912 
913 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
914 {
915 	struct bpool_iterator itr;
916 	void *b;
917 	uint8_t *start_buf = buf;
918 	uint8_t *end_buf = start_buf + len;
919 	bool ret = false;
920 	uint32_t exceptions = malloc_lock();
921 
922 	raw_malloc_validate_pools();
923 
924 	/* Check for wrapping */
925 	if (start_buf > end_buf)
926 		goto out;
927 
928 	BPOOL_FOREACH(&itr, &b) {
929 		uint8_t *start_b;
930 		uint8_t *end_b;
931 		size_t s;
932 
933 		start_b = get_payload_start_size(b, &s);
934 		end_b = start_b + s;
935 
936 		if (start_buf >= start_b && end_buf <= end_b) {
937 			ret = true;
938 			goto out;
939 		}
940 	}
941 
942 out:
943 	malloc_unlock(exceptions);
944 
945 	return ret;
946 }
947 
948 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
949 {
950 	uintptr_t buf_start = (uintptr_t) buf;
951 	uintptr_t buf_end = buf_start + len;
952 	size_t n;
953 	bool ret = false;
954 	uint32_t exceptions = malloc_lock();
955 
956 	raw_malloc_validate_pools();
957 
958 	for (n = 0; n < malloc_pool_len; n++) {
959 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
960 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
961 
962 		if (buf_start > buf_end || pool_start > pool_end) {
963 			ret = true;	/* Wrapping buffers, shouldn't happen */
964 			goto out;
965 		}
966 
967 		if (buf_end > pool_start || buf_start < pool_end) {
968 			ret = true;
969 			goto out;
970 		}
971 	}
972 
973 out:
974 	malloc_unlock(exceptions);
975 	return ret;
976 }
977