xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision e84e1feccbdbd9deae5ad2dea921f4f624e8ad6d)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <stdlib.h>
104 #include <stdint.h>
105 #include <stdbool.h>
106 #include <malloc.h>
107 #include <util.h>
108 #include <trace.h>
109 
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/thread.h>
114 #include <kernel/spinlock.h>
115 
116 static uint32_t malloc_lock(void)
117 {
118 	return cpu_spin_lock_xsave(&__malloc_spinlock);
119 }
120 
121 static void malloc_unlock(uint32_t exceptions)
122 {
123 	cpu_spin_unlock_xrestore(&__malloc_spinlock, exceptions);
124 }
125 
126 static void tag_asan_free(void *buf, size_t len)
127 {
128 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
129 }
130 
131 static void tag_asan_alloced(void *buf, size_t len)
132 {
133 	asan_tag_access(buf, (uint8_t *)buf + len);
134 }
135 
136 #else /*__KERNEL__*/
137 /* Compiling for TA */
138 static uint32_t malloc_lock(void)
139 {
140 	return 0;
141 }
142 
143 static void malloc_unlock(uint32_t exceptions __unused)
144 {
145 }
146 
147 static void tag_asan_free(void *buf __unused, size_t len __unused)
148 {
149 }
150 
151 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
152 {
153 }
154 #endif /*__KERNEL__*/
155 
156 #include "bget.c"		/* this is ugly, but this is bget */
157 
158 struct malloc_pool {
159 	void *buf;
160 	size_t len;
161 };
162 
163 static struct malloc_pool *malloc_pool;
164 static size_t malloc_pool_len;
165 
166 #ifdef BufStats
167 
168 static struct malloc_stats mstats;
169 
170 static void raw_malloc_return_hook(void *p, size_t requested_size)
171 {
172 	if (totalloc > mstats.max_allocated)
173 		mstats.max_allocated = totalloc;
174 
175 	if (!p) {
176 		mstats.num_alloc_fail++;
177 		if (requested_size > mstats.biggest_alloc_fail) {
178 			mstats.biggest_alloc_fail = requested_size;
179 			mstats.biggest_alloc_fail_used = totalloc;
180 		}
181 	}
182 }
183 
184 void malloc_reset_stats(void)
185 {
186 	unsigned int exceptions = malloc_lock();
187 
188 	mstats.max_allocated = 0;
189 	mstats.num_alloc_fail = 0;
190 	mstats.biggest_alloc_fail = 0;
191 	mstats.biggest_alloc_fail_used = 0;
192 	malloc_unlock(exceptions);
193 }
194 
195 void malloc_get_stats(struct malloc_stats *stats)
196 {
197 	uint32_t exceptions = malloc_lock();
198 
199 	memcpy(stats, &mstats, sizeof(*stats));
200 	stats->allocated = totalloc;
201 	malloc_unlock(exceptions);
202 }
203 
204 #else /* BufStats */
205 
206 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
207 {
208 }
209 
210 #endif /* BufStats */
211 
212 #ifdef BufValid
213 static void raw_malloc_validate_pools(void)
214 {
215 	size_t n;
216 
217 	for (n = 0; n < malloc_pool_len; n++)
218 		bpoolv(malloc_pool[n].buf);
219 }
220 #else
221 static void raw_malloc_validate_pools(void)
222 {
223 }
224 #endif
225 
226 struct bpool_iterator {
227 	struct bfhead *next_buf;
228 	size_t pool_idx;
229 };
230 
231 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
232 {
233 	iterator->pool_idx = 0;
234 	iterator->next_buf = BFH(malloc_pool[0].buf);
235 }
236 
237 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
238 		size_t *len, bool *isfree)
239 {
240 	struct bfhead *b = iterator->next_buf;
241 	bufsize bs = b->bh.bsize;
242 
243 	if (bs == ESent)
244 		return false;
245 
246 	if (bs < 0) {
247 		/* Allocated buffer */
248 		bs = -bs;
249 
250 		*isfree = false;
251 	} else {
252 		/* Free Buffer */
253 		*isfree = true;
254 
255 		/* Assert that the free list links are intact */
256 		assert(b->ql.blink->ql.flink == b);
257 		assert(b->ql.flink->ql.blink == b);
258 	}
259 
260 	*buf = (uint8_t *)b + sizeof(struct bhead);
261 	*len = bs - sizeof(struct bhead);
262 
263 	iterator->next_buf = BFH((uint8_t *)b + bs);
264 	return true;
265 }
266 
267 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
268 {
269 	while (true) {
270 		size_t len;
271 		bool isfree;
272 
273 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
274 			if (isfree)
275 				continue;
276 			return true;
277 		}
278 
279 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
280 			return false;
281 
282 		iterator->pool_idx++;
283 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
284 	}
285 }
286 
287 /* Convenience macro for looping over all allocated buffers */
288 #define BPOOL_FOREACH(iterator, bp) \
289 		for (bpool_foreach_iterator_init((iterator)); \
290 			bpool_foreach((iterator), (bp));)
291 
292 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
293 {
294 	void *ptr = NULL;
295 	size_t s = hdr_size + ftr_size + pl_size;
296 
297 	/*
298 	 * Make sure that malloc has correct alignment of returned buffers.
299 	 * The assumption is that uintptr_t will be as wide as the largest
300 	 * required alignment of any type.
301 	 */
302 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
303 
304 	raw_malloc_validate_pools();
305 
306 	/* Check wrapping */
307 	if (s < pl_size)
308 		goto out;
309 
310 	/* BGET doesn't like 0 sized allocations */
311 	if (!s)
312 		s++;
313 
314 	ptr = bget(s);
315 out:
316 	raw_malloc_return_hook(ptr, pl_size);
317 
318 	return ptr;
319 }
320 
321 static void raw_free(void *ptr)
322 {
323 	raw_malloc_validate_pools();
324 
325 	if (ptr)
326 		brel(ptr);
327 }
328 
329 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
330 		size_t pl_size)
331 {
332 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
333 	void *ptr = NULL;
334 
335 	raw_malloc_validate_pools();
336 
337 	/* Check wrapping */
338 	if (s < pl_nmemb || s < pl_size)
339 		goto out;
340 
341 	/* BGET doesn't like 0 sized allocations */
342 	if (!s)
343 		s++;
344 
345 	ptr = bgetz(s);
346 out:
347 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
348 
349 	return ptr;
350 }
351 
352 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
353 		size_t pl_size)
354 {
355 	size_t s = hdr_size + ftr_size + pl_size;
356 	void *p = NULL;
357 
358 	/* Check wrapping */
359 	if (s < pl_size)
360 		goto out;
361 
362 	raw_malloc_validate_pools();
363 
364 	/* BGET doesn't like 0 sized allocations */
365 	if (!s)
366 		s++;
367 
368 	p = bgetr(ptr, s);
369 out:
370 	raw_malloc_return_hook(p, pl_size);
371 
372 	return p;
373 }
374 
375 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
376 {
377 	assert(BH((char *)bf + size) == bn);
378 	assert(bn->bsize < 0); /* Next block should be allocated */
379 	/* Next block shouldn't already have free block in front */
380 	assert(bn->prevfree == 0);
381 
382 	/* Create the free buf header */
383 	bf->bh.bsize = size;
384 	bf->bh.prevfree = 0;
385 
386 	/* Update next block to point to the new free buf header */
387 	bn->prevfree = size;
388 
389 	/* Insert the free buffer on the free list */
390 	assert(freelist.ql.blink->ql.flink == &freelist);
391 	assert(freelist.ql.flink->ql.blink == &freelist);
392 	bf->ql.flink = &freelist;
393 	bf->ql.blink = freelist.ql.blink;
394 	freelist.ql.blink = bf;
395 	bf->ql.blink->ql.flink = bf;
396 }
397 
398 static void brel_before(char *orig_buf, char *new_buf)
399 {
400 	struct bfhead *bf;
401 	struct bhead *b;
402 	bufsize size;
403 	bufsize orig_size;
404 
405 	assert(orig_buf < new_buf);
406 	/* There has to be room for the freebuf header */
407 	size = (bufsize)(new_buf - orig_buf);
408 	assert(size >= (SizeQ + sizeof(struct bhead)));
409 
410 	/* Point to head of original buffer */
411 	bf = BFH(orig_buf - sizeof(struct bhead));
412 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
413 
414 	/* Point to head of the becoming new allocated buffer */
415 	b = BH(new_buf - sizeof(struct bhead));
416 
417 	if (bf->bh.prevfree != 0) {
418 		/* Previous buffer is free, consolidate with that buffer */
419 		struct bfhead *bfp;
420 
421 		/* Update the previous free buffer */
422 		bfp = BFH((char *)bf - bf->bh.prevfree);
423 		assert(bfp->bh.bsize == bf->bh.prevfree);
424 		bfp->bh.bsize += size;
425 
426 		/* Make a new allocated buffer header */
427 		b->prevfree = bfp->bh.bsize;
428 		/* Make it negative since it's an allocated buffer */
429 		b->bsize = -(orig_size - size);
430 	} else {
431 		/*
432 		 * Previous buffer is allocated, create a new buffer and
433 		 * insert on the free list.
434 		 */
435 
436 		/* Make it negative since it's an allocated buffer */
437 		b->bsize = -(orig_size - size);
438 
439 		create_free_block(bf, size, b);
440 	}
441 
442 #ifdef BufStats
443 	totalloc -= size;
444 	assert(totalloc >= 0);
445 #endif
446 }
447 
448 static void brel_after(char *buf, bufsize size)
449 {
450 	struct bhead *b = BH(buf - sizeof(struct bhead));
451 	struct bhead *bn;
452 	bufsize new_size = size;
453 	bufsize free_size;
454 
455 	/* Select the size in the same way as in bget() */
456 	if (new_size < SizeQ)
457 		new_size = SizeQ;
458 #ifdef SizeQuant
459 #if SizeQuant > 1
460 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
461 #endif
462 #endif
463 	new_size += sizeof(struct bhead);
464 	assert(new_size <= -b->bsize);
465 
466 	/*
467 	 * Check if there's enough space at the end of the buffer to be
468 	 * able to free anything.
469 	 */
470 	free_size = -b->bsize - new_size;
471 	if (free_size < SizeQ + sizeof(struct bhead))
472 		return;
473 
474 	bn = BH((char *)b - b->bsize);
475 	/*
476 	 * Set the new size of the buffer;
477 	 */
478 	b->bsize = -new_size;
479 	if (bn->bsize > 0) {
480 		/* Next buffer is free, consolidate with that buffer */
481 		struct bfhead *bfn = BFH(bn);
482 		struct bfhead *nbf = BFH((char *)b + new_size);
483 		struct bhead *bnn = BH((char *)bn + bn->bsize);
484 
485 		assert(bfn->bh.prevfree == 0);
486 		assert(bnn->prevfree == bfn->bh.bsize);
487 
488 		/* Construct the new free header */
489 		nbf->bh.prevfree = 0;
490 		nbf->bh.bsize = bfn->bh.bsize + free_size;
491 
492 		/* Update the buffer after this to point to this header */
493 		bnn->prevfree += free_size;
494 
495 		/*
496 		 * Unlink the previous free buffer and link the new free
497 		 * buffer.
498 		 */
499 		assert(bfn->ql.blink->ql.flink == bfn);
500 		assert(bfn->ql.flink->ql.blink == bfn);
501 
502 		/* Assing blink and flink from old free buffer */
503 		nbf->ql.blink = bfn->ql.blink;
504 		nbf->ql.flink = bfn->ql.flink;
505 
506 		/* Replace the old free buffer with the new one */
507 		nbf->ql.blink->ql.flink = nbf;
508 		nbf->ql.flink->ql.blink = nbf;
509 	} else {
510 		/* New buffer is allocated, create a new free buffer */
511 		create_free_block(BFH((char *)b + new_size), free_size, bn);
512 	}
513 
514 #ifdef BufStats
515 	totalloc -= free_size;
516 	assert(totalloc >= 0);
517 #endif
518 
519 }
520 
521 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
522 		size_t size)
523 {
524 	size_t s;
525 	uintptr_t b;
526 
527 	raw_malloc_validate_pools();
528 
529 	if (!IS_POWER_OF_TWO(alignment))
530 		return NULL;
531 
532 	/*
533 	 * Normal malloc with headers always returns something SizeQuant
534 	 * aligned.
535 	 */
536 	if (alignment <= SizeQuant)
537 		return raw_malloc(hdr_size, ftr_size, size);
538 
539 	s = hdr_size + ftr_size + alignment + size +
540 	    SizeQ + sizeof(struct bhead);
541 
542 	/* Check wapping */
543 	if (s < alignment || s < size)
544 		return NULL;
545 
546 	b = (uintptr_t)bget(s);
547 	if (!b)
548 		goto out;
549 
550 	if ((b + hdr_size) & (alignment - 1)) {
551 		/*
552 		 * Returned buffer is not aligned as requested if the
553 		 * hdr_size is added. Find an offset into the buffer
554 		 * that is far enough in to the buffer to be able to free
555 		 * what's in front.
556 		 */
557 		uintptr_t p;
558 
559 		/*
560 		 * Find the point where the buffer including supplied
561 		 * header size should start.
562 		 */
563 		p = b + hdr_size + alignment;
564 		p &= ~(alignment - 1);
565 		p -= hdr_size;
566 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
567 			p += alignment;
568 		assert((p + hdr_size + ftr_size + size) <= (b + s));
569 
570 		/* Free the front part of the buffer */
571 		brel_before((void *)b, (void *)p);
572 
573 		/* Set the new start of the buffer */
574 		b = p;
575 	}
576 
577 	/*
578 	 * Since b is now aligned, release what we don't need at the end of
579 	 * the buffer.
580 	 */
581 	brel_after((void *)b, hdr_size + ftr_size + size);
582 out:
583 	raw_malloc_return_hook((void *)b, size);
584 
585 	return (void *)b;
586 }
587 
588 /* Most of the stuff in this function is copied from bgetr() in bget.c */
589 static __maybe_unused bufsize bget_buf_size(void *buf)
590 {
591 	bufsize osize;          /* Old size of buffer */
592 	struct bhead *b;
593 
594 	b = BH(((char *)buf) - sizeof(struct bhead));
595 	osize = -b->bsize;
596 #ifdef BECtl
597 	if (osize == 0) {
598 		/*  Buffer acquired directly through acqfcn. */
599 		struct bdhead *bd;
600 
601 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
602 		osize = bd->tsize - sizeof(struct bdhead);
603 	} else
604 #endif
605 		osize -= sizeof(struct bhead);
606 	assert(osize > 0);
607 	return osize;
608 }
609 
610 #ifdef ENABLE_MDBG
611 
612 struct mdbg_hdr {
613 	const char *fname;
614 	uint16_t line;
615 	uint32_t pl_size;
616 	uint32_t magic;
617 #if defined(ARM64)
618 	uint64_t pad;
619 #endif
620 };
621 
622 #define MDBG_HEADER_MAGIC	0xadadadad
623 #define MDBG_FOOTER_MAGIC	0xecececec
624 
625 static size_t mdbg_get_ftr_size(size_t pl_size)
626 {
627 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
628 
629 	return ftr_pad + sizeof(uint32_t);
630 }
631 
632 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
633 {
634 	uint32_t *footer;
635 
636 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
637 			      mdbg_get_ftr_size(hdr->pl_size));
638 	footer--;
639 	return footer;
640 }
641 
642 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
643 		int lineno, size_t pl_size)
644 {
645 	uint32_t *footer;
646 
647 	hdr->fname = fname;
648 	hdr->line = lineno;
649 	hdr->pl_size = pl_size;
650 	hdr->magic = MDBG_HEADER_MAGIC;
651 
652 	footer = mdbg_get_footer(hdr);
653 	*footer = MDBG_FOOTER_MAGIC;
654 }
655 
656 void *mdbg_malloc(const char *fname, int lineno, size_t size)
657 {
658 	struct mdbg_hdr *hdr;
659 	uint32_t exceptions = malloc_lock();
660 
661 	/*
662 	 * Check struct mdbg_hdr doesn't get bad alignment.
663 	 * This is required by C standard: the buffer returned from
664 	 * malloc() should be aligned with a fundamental alignment.
665 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
666 	 */
667 	COMPILE_TIME_ASSERT(
668 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
669 
670 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
671 			  mdbg_get_ftr_size(size), size);
672 	if (hdr) {
673 		mdbg_update_hdr(hdr, fname, lineno, size);
674 		hdr++;
675 	}
676 
677 	malloc_unlock(exceptions);
678 	return hdr;
679 }
680 
681 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
682 {
683 	assert(hdr->magic == MDBG_HEADER_MAGIC);
684 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
685 }
686 
687 static void mdbg_free(void *ptr)
688 {
689 	struct mdbg_hdr *hdr = ptr;
690 
691 	if (hdr) {
692 		hdr--;
693 		assert_header(hdr);
694 		hdr->magic = 0;
695 		*mdbg_get_footer(hdr) = 0;
696 		raw_free(hdr);
697 	}
698 }
699 
700 void free(void *ptr)
701 {
702 	uint32_t exceptions = malloc_lock();
703 
704 	mdbg_free(ptr);
705 	malloc_unlock(exceptions);
706 }
707 
708 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
709 {
710 	struct mdbg_hdr *hdr;
711 	uint32_t exceptions = malloc_lock();
712 
713 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
714 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
715 	if (hdr) {
716 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
717 		hdr++;
718 	}
719 	malloc_unlock(exceptions);
720 	return hdr;
721 }
722 
723 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
724 			    void *ptr, size_t size)
725 {
726 	struct mdbg_hdr *hdr = ptr;
727 
728 	if (hdr) {
729 		hdr--;
730 		assert_header(hdr);
731 	}
732 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
733 			   mdbg_get_ftr_size(size), size);
734 	if (hdr) {
735 		mdbg_update_hdr(hdr, fname, lineno, size);
736 		hdr++;
737 	}
738 	return hdr;
739 }
740 
741 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
742 {
743 	void *p;
744 	uint32_t exceptions = malloc_lock();
745 
746 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
747 	malloc_unlock(exceptions);
748 	return p;
749 }
750 
751 #define realloc_unlocked(ptr, size) \
752 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
753 
754 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
755 		size_t size)
756 {
757 	struct mdbg_hdr *hdr;
758 	uint32_t exceptions = malloc_lock();
759 
760 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
761 			   alignment, size);
762 	if (hdr) {
763 		mdbg_update_hdr(hdr, fname, lineno, size);
764 		hdr++;
765 	}
766 	malloc_unlock(exceptions);
767 	return hdr;
768 }
769 
770 
771 static void *get_payload_start_size(void *raw_buf, size_t *size)
772 {
773 	struct mdbg_hdr *hdr = raw_buf;
774 
775 	assert(bget_buf_size(hdr) >= hdr->pl_size);
776 	*size = hdr->pl_size;
777 	return hdr + 1;
778 }
779 
780 void mdbg_check(int bufdump)
781 {
782 	struct bpool_iterator itr;
783 	void *b;
784 	uint32_t exceptions = malloc_lock();
785 
786 	raw_malloc_validate_pools();
787 
788 	BPOOL_FOREACH(&itr, &b) {
789 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
790 
791 		assert_header(hdr);
792 
793 		if (bufdump > 0) {
794 			const char *fname = hdr->fname;
795 
796 			if (!fname)
797 				fname = "unknown";
798 
799 			IMSG("buffer: %d bytes %s:%d\n",
800 				hdr->pl_size, fname, hdr->line);
801 		}
802 	}
803 
804 	malloc_unlock(exceptions);
805 }
806 
807 #else
808 
809 void *malloc(size_t size)
810 {
811 	void *p;
812 	uint32_t exceptions = malloc_lock();
813 
814 	p = raw_malloc(0, 0, size);
815 	malloc_unlock(exceptions);
816 	return p;
817 }
818 
819 void free(void *ptr)
820 {
821 	uint32_t exceptions = malloc_lock();
822 
823 	raw_free(ptr);
824 	malloc_unlock(exceptions);
825 }
826 
827 void *calloc(size_t nmemb, size_t size)
828 {
829 	void *p;
830 	uint32_t exceptions = malloc_lock();
831 
832 	p = raw_calloc(0, 0, nmemb, size);
833 	malloc_unlock(exceptions);
834 	return p;
835 }
836 
837 static void *realloc_unlocked(void *ptr, size_t size)
838 {
839 	return raw_realloc(ptr, 0, 0, size);
840 }
841 
842 void *realloc(void *ptr, size_t size)
843 {
844 	void *p;
845 	uint32_t exceptions = malloc_lock();
846 
847 	p = realloc_unlocked(ptr, size);
848 	malloc_unlock(exceptions);
849 	return p;
850 }
851 
852 void *memalign(size_t alignment, size_t size)
853 {
854 	void *p;
855 	uint32_t exceptions = malloc_lock();
856 
857 	p = raw_memalign(0, 0, alignment, size);
858 	malloc_unlock(exceptions);
859 	return p;
860 }
861 
862 static void *get_payload_start_size(void *ptr, size_t *size)
863 {
864 	*size = bget_buf_size(ptr);
865 	return ptr;
866 }
867 
868 #endif
869 
870 void malloc_add_pool(void *buf, size_t len)
871 {
872 	void *p;
873 	size_t l;
874 	uint32_t exceptions;
875 	uintptr_t start = (uintptr_t)buf;
876 	uintptr_t end = start + len;
877 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
878 					(~(SizeQuant - 1))) +
879 				sizeof(struct bhead) * 2;
880 
881 
882 	start = ROUNDUP(start, SizeQuant);
883 	end = ROUNDDOWN(end, SizeQuant);
884 	assert(start < end);
885 
886 	if ((end - start) < min_len) {
887 		DMSG("Skipping too small pool");
888 		return;
889 	}
890 
891 	exceptions = malloc_lock();
892 	tag_asan_free((void *)start, end - start);
893 	bpool((void *)start, end - start);
894 	l = malloc_pool_len + 1;
895 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
896 	assert(p);
897 	malloc_pool = p;
898 	malloc_pool[malloc_pool_len].buf = (void *)start;
899 	malloc_pool[malloc_pool_len].len = end - start;
900 #ifdef BufStats
901 	mstats.size += malloc_pool[malloc_pool_len].len;
902 #endif
903 	malloc_pool_len = l;
904 	malloc_unlock(exceptions);
905 }
906 
907 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
908 {
909 	struct bpool_iterator itr;
910 	void *b;
911 	uint8_t *start_buf = buf;
912 	uint8_t *end_buf = start_buf + len;
913 	bool ret = false;
914 	uint32_t exceptions = malloc_lock();
915 
916 	raw_malloc_validate_pools();
917 
918 	/* Check for wrapping */
919 	if (start_buf > end_buf)
920 		goto out;
921 
922 	BPOOL_FOREACH(&itr, &b) {
923 		uint8_t *start_b;
924 		uint8_t *end_b;
925 		size_t s;
926 
927 		start_b = get_payload_start_size(b, &s);
928 		end_b = start_b + s;
929 
930 		if (start_buf >= start_b && end_buf <= end_b) {
931 			ret = true;
932 			goto out;
933 		}
934 	}
935 
936 out:
937 	malloc_unlock(exceptions);
938 
939 	return ret;
940 }
941 
942 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
943 {
944 	uintptr_t buf_start = (uintptr_t) buf;
945 	uintptr_t buf_end = buf_start + len;
946 	size_t n;
947 	bool ret = false;
948 	uint32_t exceptions = malloc_lock();
949 
950 	raw_malloc_validate_pools();
951 
952 	for (n = 0; n < malloc_pool_len; n++) {
953 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
954 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
955 
956 		if (buf_start > buf_end || pool_start > pool_end) {
957 			ret = true;	/* Wrapping buffers, shouldn't happen */
958 			goto out;
959 		}
960 
961 		if (buf_end > pool_start || buf_start < pool_end) {
962 			ret = true;
963 			goto out;
964 		}
965 	}
966 
967 out:
968 	malloc_unlock(exceptions);
969 	return ret;
970 }
971