xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 1bb929836182ecb96d2d9d268daa807c67596396)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #define PROTOTYPES
30 
31 /*
32  *  BGET CONFIGURATION
33  *  ==================
34  */
35 /* #define BGET_ENABLE_ALL_OPTIONS */
36 #ifdef BGET_ENABLE_OPTION
37 #define TestProg    20000	/* Generate built-in test program
38 				   if defined.  The value specifies
39 				   how many buffer allocation attempts
40 				   the test program should make. */
41 #endif
42 
43 
44 #ifdef __LP64__
45 #define SizeQuant   16
46 #endif
47 #ifdef __ILP32__
48 #define SizeQuant   8
49 #endif
50 				/* Buffer allocation size quantum:
51 				   all buffers allocated are a
52 				   multiple of this size.  This
53 				   MUST be a power of two. */
54 
55 #ifdef BGET_ENABLE_OPTION
56 #define BufDump     1		/* Define this symbol to enable the
57 				   bpoold() function which dumps the
58 				   buffers in a buffer pool. */
59 
60 #define BufValid    1		/* Define this symbol to enable the
61 				   bpoolv() function for validating
62 				   a buffer pool. */
63 
64 #define DumpData    1		/* Define this symbol to enable the
65 				   bufdump() function which allows
66 				   dumping the contents of an allocated
67 				   or free buffer. */
68 
69 #define BufStats    1		/* Define this symbol to enable the
70 				   bstats() function which calculates
71 				   the total free space in the buffer
72 				   pool, the largest available
73 				   buffer, and the total space
74 				   currently allocated. */
75 
76 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
77 				   pattern of garbage to trip up
78 				   miscreants who attempt to use
79 				   pointers into released buffers. */
80 
81 #define BestFit     1		/* Use a best fit algorithm when
82 				   searching for space for an
83 				   allocation request.  This uses
84 				   memory more efficiently, but
85 				   allocation will be much slower. */
86 
87 #define BECtl       1		/* Define this symbol to enable the
88 				   bectl() function for automatic
89 				   pool space control.  */
90 #endif
91 
92 #ifdef MEM_DEBUG
93 #undef NDEBUG
94 #define DumpData    1
95 #define BufValid    1
96 #define FreeWipe    1
97 #endif
98 
99 #ifdef CFG_WITH_STATS
100 #define BufStats    1
101 #endif
102 
103 #include <compiler.h>
104 #include <malloc.h>
105 #include <stdbool.h>
106 #include <stdint.h>
107 #include <stdlib.h>
108 #include <string.h>
109 #include <trace.h>
110 #include <util.h>
111 
112 #if defined(__KERNEL__)
113 /* Compiling for TEE Core */
114 #include <kernel/asan.h>
115 #include <kernel/thread.h>
116 #include <kernel/spinlock.h>
117 
118 static uint32_t malloc_lock(void)
119 {
120 	return cpu_spin_lock_xsave(&__malloc_spinlock);
121 }
122 
123 static void malloc_unlock(uint32_t exceptions)
124 {
125 	cpu_spin_unlock_xrestore(&__malloc_spinlock, exceptions);
126 }
127 
128 static void tag_asan_free(void *buf, size_t len)
129 {
130 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
131 }
132 
133 static void tag_asan_alloced(void *buf, size_t len)
134 {
135 	asan_tag_access(buf, (uint8_t *)buf + len);
136 }
137 
138 static void *memset_unchecked(void *s, int c, size_t n)
139 {
140 	return asan_memset_unchecked(s, c, n);
141 }
142 
143 #else /*__KERNEL__*/
144 /* Compiling for TA */
145 static uint32_t malloc_lock(void)
146 {
147 	return 0;
148 }
149 
150 static void malloc_unlock(uint32_t exceptions __unused)
151 {
152 }
153 
154 static void tag_asan_free(void *buf __unused, size_t len __unused)
155 {
156 }
157 
158 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
159 {
160 }
161 
162 static void *memset_unchecked(void *s, int c, size_t n)
163 {
164 	return memset(s, c, n);
165 }
166 
167 #endif /*__KERNEL__*/
168 
169 #include "bget.c"		/* this is ugly, but this is bget */
170 
171 struct malloc_pool {
172 	void *buf;
173 	size_t len;
174 };
175 
176 static struct malloc_pool *malloc_pool;
177 static size_t malloc_pool_len;
178 
179 #ifdef BufStats
180 
181 static struct malloc_stats mstats;
182 
183 static void raw_malloc_return_hook(void *p, size_t requested_size)
184 {
185 	if (totalloc > mstats.max_allocated)
186 		mstats.max_allocated = totalloc;
187 
188 	if (!p) {
189 		mstats.num_alloc_fail++;
190 		if (requested_size > mstats.biggest_alloc_fail) {
191 			mstats.biggest_alloc_fail = requested_size;
192 			mstats.biggest_alloc_fail_used = totalloc;
193 		}
194 	}
195 }
196 
197 void malloc_reset_stats(void)
198 {
199 	uint32_t exceptions = malloc_lock();
200 
201 	mstats.max_allocated = 0;
202 	mstats.num_alloc_fail = 0;
203 	mstats.biggest_alloc_fail = 0;
204 	mstats.biggest_alloc_fail_used = 0;
205 	malloc_unlock(exceptions);
206 }
207 
208 void malloc_get_stats(struct malloc_stats *stats)
209 {
210 	uint32_t exceptions = malloc_lock();
211 
212 	memcpy(stats, &mstats, sizeof(*stats));
213 	stats->allocated = totalloc;
214 	malloc_unlock(exceptions);
215 }
216 
217 #else /* BufStats */
218 
219 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
220 {
221 }
222 
223 #endif /* BufStats */
224 
225 #ifdef BufValid
226 static void raw_malloc_validate_pools(void)
227 {
228 	size_t n;
229 
230 	for (n = 0; n < malloc_pool_len; n++)
231 		bpoolv(malloc_pool[n].buf);
232 }
233 #else
234 static void raw_malloc_validate_pools(void)
235 {
236 }
237 #endif
238 
239 struct bpool_iterator {
240 	struct bfhead *next_buf;
241 	size_t pool_idx;
242 };
243 
244 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
245 {
246 	iterator->pool_idx = 0;
247 	iterator->next_buf = BFH(malloc_pool[0].buf);
248 }
249 
250 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
251 		size_t *len, bool *isfree)
252 {
253 	struct bfhead *b = iterator->next_buf;
254 	bufsize bs = b->bh.bsize;
255 
256 	if (bs == ESent)
257 		return false;
258 
259 	if (bs < 0) {
260 		/* Allocated buffer */
261 		bs = -bs;
262 
263 		*isfree = false;
264 	} else {
265 		/* Free Buffer */
266 		*isfree = true;
267 
268 		/* Assert that the free list links are intact */
269 		assert(b->ql.blink->ql.flink == b);
270 		assert(b->ql.flink->ql.blink == b);
271 	}
272 
273 	*buf = (uint8_t *)b + sizeof(struct bhead);
274 	*len = bs - sizeof(struct bhead);
275 
276 	iterator->next_buf = BFH((uint8_t *)b + bs);
277 	return true;
278 }
279 
280 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
281 {
282 	while (true) {
283 		size_t len;
284 		bool isfree;
285 
286 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
287 			if (isfree)
288 				continue;
289 			return true;
290 		}
291 
292 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
293 			return false;
294 
295 		iterator->pool_idx++;
296 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
297 	}
298 }
299 
300 /* Convenience macro for looping over all allocated buffers */
301 #define BPOOL_FOREACH(iterator, bp) \
302 		for (bpool_foreach_iterator_init((iterator)); \
303 			bpool_foreach((iterator), (bp));)
304 
305 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
306 {
307 	void *ptr = NULL;
308 	size_t s = hdr_size + ftr_size + pl_size;
309 
310 	/*
311 	 * Make sure that malloc has correct alignment of returned buffers.
312 	 * The assumption is that uintptr_t will be as wide as the largest
313 	 * required alignment of any type.
314 	 */
315 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
316 
317 	raw_malloc_validate_pools();
318 
319 	/* Check wrapping */
320 	if (s < pl_size)
321 		goto out;
322 
323 	/* BGET doesn't like 0 sized allocations */
324 	if (!s)
325 		s++;
326 
327 	ptr = bget(s);
328 out:
329 	raw_malloc_return_hook(ptr, pl_size);
330 
331 	return ptr;
332 }
333 
334 static void raw_free(void *ptr)
335 {
336 	raw_malloc_validate_pools();
337 
338 	if (ptr)
339 		brel(ptr);
340 }
341 
342 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
343 		size_t pl_size)
344 {
345 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
346 	void *ptr = NULL;
347 
348 	raw_malloc_validate_pools();
349 
350 	/* Check wrapping */
351 	if (s < pl_nmemb || s < pl_size)
352 		goto out;
353 
354 	/* BGET doesn't like 0 sized allocations */
355 	if (!s)
356 		s++;
357 
358 	ptr = bgetz(s);
359 out:
360 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
361 
362 	return ptr;
363 }
364 
365 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
366 		size_t pl_size)
367 {
368 	size_t s = hdr_size + ftr_size + pl_size;
369 	void *p = NULL;
370 
371 	/* Check wrapping */
372 	if (s < pl_size)
373 		goto out;
374 
375 	raw_malloc_validate_pools();
376 
377 	/* BGET doesn't like 0 sized allocations */
378 	if (!s)
379 		s++;
380 
381 	p = bgetr(ptr, s);
382 out:
383 	raw_malloc_return_hook(p, pl_size);
384 
385 	return p;
386 }
387 
388 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
389 {
390 	assert(BH((char *)bf + size) == bn);
391 	assert(bn->bsize < 0); /* Next block should be allocated */
392 	/* Next block shouldn't already have free block in front */
393 	assert(bn->prevfree == 0);
394 
395 	/* Create the free buf header */
396 	bf->bh.bsize = size;
397 	bf->bh.prevfree = 0;
398 
399 	/* Update next block to point to the new free buf header */
400 	bn->prevfree = size;
401 
402 	/* Insert the free buffer on the free list */
403 	assert(freelist.ql.blink->ql.flink == &freelist);
404 	assert(freelist.ql.flink->ql.blink == &freelist);
405 	bf->ql.flink = &freelist;
406 	bf->ql.blink = freelist.ql.blink;
407 	freelist.ql.blink = bf;
408 	bf->ql.blink->ql.flink = bf;
409 }
410 
411 static void brel_before(char *orig_buf, char *new_buf)
412 {
413 	struct bfhead *bf;
414 	struct bhead *b;
415 	bufsize size;
416 	bufsize orig_size;
417 
418 	assert(orig_buf < new_buf);
419 	/* There has to be room for the freebuf header */
420 	size = (bufsize)(new_buf - orig_buf);
421 	assert(size >= (SizeQ + sizeof(struct bhead)));
422 
423 	/* Point to head of original buffer */
424 	bf = BFH(orig_buf - sizeof(struct bhead));
425 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
426 
427 	/* Point to head of the becoming new allocated buffer */
428 	b = BH(new_buf - sizeof(struct bhead));
429 
430 	if (bf->bh.prevfree != 0) {
431 		/* Previous buffer is free, consolidate with that buffer */
432 		struct bfhead *bfp;
433 
434 		/* Update the previous free buffer */
435 		bfp = BFH((char *)bf - bf->bh.prevfree);
436 		assert(bfp->bh.bsize == bf->bh.prevfree);
437 		bfp->bh.bsize += size;
438 
439 		/* Make a new allocated buffer header */
440 		b->prevfree = bfp->bh.bsize;
441 		/* Make it negative since it's an allocated buffer */
442 		b->bsize = -(orig_size - size);
443 	} else {
444 		/*
445 		 * Previous buffer is allocated, create a new buffer and
446 		 * insert on the free list.
447 		 */
448 
449 		/* Make it negative since it's an allocated buffer */
450 		b->bsize = -(orig_size - size);
451 
452 		create_free_block(bf, size, b);
453 	}
454 
455 #ifdef BufStats
456 	totalloc -= size;
457 	assert(totalloc >= 0);
458 #endif
459 }
460 
461 static void brel_after(char *buf, bufsize size)
462 {
463 	struct bhead *b = BH(buf - sizeof(struct bhead));
464 	struct bhead *bn;
465 	bufsize new_size = size;
466 	bufsize free_size;
467 
468 	/* Select the size in the same way as in bget() */
469 	if (new_size < SizeQ)
470 		new_size = SizeQ;
471 #ifdef SizeQuant
472 #if SizeQuant > 1
473 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
474 #endif
475 #endif
476 	new_size += sizeof(struct bhead);
477 	assert(new_size <= -b->bsize);
478 
479 	/*
480 	 * Check if there's enough space at the end of the buffer to be
481 	 * able to free anything.
482 	 */
483 	free_size = -b->bsize - new_size;
484 	if (free_size < SizeQ + sizeof(struct bhead))
485 		return;
486 
487 	bn = BH((char *)b - b->bsize);
488 	/*
489 	 * Set the new size of the buffer;
490 	 */
491 	b->bsize = -new_size;
492 	if (bn->bsize > 0) {
493 		/* Next buffer is free, consolidate with that buffer */
494 		struct bfhead *bfn = BFH(bn);
495 		struct bfhead *nbf = BFH((char *)b + new_size);
496 		struct bhead *bnn = BH((char *)bn + bn->bsize);
497 
498 		assert(bfn->bh.prevfree == 0);
499 		assert(bnn->prevfree == bfn->bh.bsize);
500 
501 		/* Construct the new free header */
502 		nbf->bh.prevfree = 0;
503 		nbf->bh.bsize = bfn->bh.bsize + free_size;
504 
505 		/* Update the buffer after this to point to this header */
506 		bnn->prevfree += free_size;
507 
508 		/*
509 		 * Unlink the previous free buffer and link the new free
510 		 * buffer.
511 		 */
512 		assert(bfn->ql.blink->ql.flink == bfn);
513 		assert(bfn->ql.flink->ql.blink == bfn);
514 
515 		/* Assing blink and flink from old free buffer */
516 		nbf->ql.blink = bfn->ql.blink;
517 		nbf->ql.flink = bfn->ql.flink;
518 
519 		/* Replace the old free buffer with the new one */
520 		nbf->ql.blink->ql.flink = nbf;
521 		nbf->ql.flink->ql.blink = nbf;
522 	} else {
523 		/* New buffer is allocated, create a new free buffer */
524 		create_free_block(BFH((char *)b + new_size), free_size, bn);
525 	}
526 
527 #ifdef BufStats
528 	totalloc -= free_size;
529 	assert(totalloc >= 0);
530 #endif
531 
532 }
533 
534 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
535 		size_t size)
536 {
537 	size_t s;
538 	uintptr_t b;
539 
540 	raw_malloc_validate_pools();
541 
542 	if (!IS_POWER_OF_TWO(alignment))
543 		return NULL;
544 
545 	/*
546 	 * Normal malloc with headers always returns something SizeQuant
547 	 * aligned.
548 	 */
549 	if (alignment <= SizeQuant)
550 		return raw_malloc(hdr_size, ftr_size, size);
551 
552 	s = hdr_size + ftr_size + alignment + size +
553 	    SizeQ + sizeof(struct bhead);
554 
555 	/* Check wapping */
556 	if (s < alignment || s < size)
557 		return NULL;
558 
559 	b = (uintptr_t)bget(s);
560 	if (!b)
561 		goto out;
562 
563 	if ((b + hdr_size) & (alignment - 1)) {
564 		/*
565 		 * Returned buffer is not aligned as requested if the
566 		 * hdr_size is added. Find an offset into the buffer
567 		 * that is far enough in to the buffer to be able to free
568 		 * what's in front.
569 		 */
570 		uintptr_t p;
571 
572 		/*
573 		 * Find the point where the buffer including supplied
574 		 * header size should start.
575 		 */
576 		p = b + hdr_size + alignment;
577 		p &= ~(alignment - 1);
578 		p -= hdr_size;
579 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
580 			p += alignment;
581 		assert((p + hdr_size + ftr_size + size) <= (b + s));
582 
583 		/* Free the front part of the buffer */
584 		brel_before((void *)b, (void *)p);
585 
586 		/* Set the new start of the buffer */
587 		b = p;
588 	}
589 
590 	/*
591 	 * Since b is now aligned, release what we don't need at the end of
592 	 * the buffer.
593 	 */
594 	brel_after((void *)b, hdr_size + ftr_size + size);
595 out:
596 	raw_malloc_return_hook((void *)b, size);
597 
598 	return (void *)b;
599 }
600 
601 /* Most of the stuff in this function is copied from bgetr() in bget.c */
602 static __maybe_unused bufsize bget_buf_size(void *buf)
603 {
604 	bufsize osize;          /* Old size of buffer */
605 	struct bhead *b;
606 
607 	b = BH(((char *)buf) - sizeof(struct bhead));
608 	osize = -b->bsize;
609 #ifdef BECtl
610 	if (osize == 0) {
611 		/*  Buffer acquired directly through acqfcn. */
612 		struct bdhead *bd;
613 
614 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
615 		osize = bd->tsize - sizeof(struct bdhead);
616 	} else
617 #endif
618 		osize -= sizeof(struct bhead);
619 	assert(osize > 0);
620 	return osize;
621 }
622 
623 #ifdef ENABLE_MDBG
624 
625 struct mdbg_hdr {
626 	const char *fname;
627 	uint16_t line;
628 	uint32_t pl_size;
629 	uint32_t magic;
630 #if defined(ARM64)
631 	uint64_t pad;
632 #endif
633 };
634 
635 #define MDBG_HEADER_MAGIC	0xadadadad
636 #define MDBG_FOOTER_MAGIC	0xecececec
637 
638 static size_t mdbg_get_ftr_size(size_t pl_size)
639 {
640 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
641 
642 	return ftr_pad + sizeof(uint32_t);
643 }
644 
645 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
646 {
647 	uint32_t *footer;
648 
649 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
650 			      mdbg_get_ftr_size(hdr->pl_size));
651 	footer--;
652 	return footer;
653 }
654 
655 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
656 		int lineno, size_t pl_size)
657 {
658 	uint32_t *footer;
659 
660 	hdr->fname = fname;
661 	hdr->line = lineno;
662 	hdr->pl_size = pl_size;
663 	hdr->magic = MDBG_HEADER_MAGIC;
664 
665 	footer = mdbg_get_footer(hdr);
666 	*footer = MDBG_FOOTER_MAGIC;
667 }
668 
669 void *mdbg_malloc(const char *fname, int lineno, size_t size)
670 {
671 	struct mdbg_hdr *hdr;
672 	uint32_t exceptions = malloc_lock();
673 
674 	/*
675 	 * Check struct mdbg_hdr doesn't get bad alignment.
676 	 * This is required by C standard: the buffer returned from
677 	 * malloc() should be aligned with a fundamental alignment.
678 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
679 	 */
680 	COMPILE_TIME_ASSERT(
681 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
682 
683 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
684 			  mdbg_get_ftr_size(size), size);
685 	if (hdr) {
686 		mdbg_update_hdr(hdr, fname, lineno, size);
687 		hdr++;
688 	}
689 
690 	malloc_unlock(exceptions);
691 	return hdr;
692 }
693 
694 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
695 {
696 	assert(hdr->magic == MDBG_HEADER_MAGIC);
697 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
698 }
699 
700 static void mdbg_free(void *ptr)
701 {
702 	struct mdbg_hdr *hdr = ptr;
703 
704 	if (hdr) {
705 		hdr--;
706 		assert_header(hdr);
707 		hdr->magic = 0;
708 		*mdbg_get_footer(hdr) = 0;
709 		raw_free(hdr);
710 	}
711 }
712 
713 void free(void *ptr)
714 {
715 	uint32_t exceptions = malloc_lock();
716 
717 	mdbg_free(ptr);
718 	malloc_unlock(exceptions);
719 }
720 
721 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
722 {
723 	struct mdbg_hdr *hdr;
724 	uint32_t exceptions = malloc_lock();
725 
726 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
727 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
728 	if (hdr) {
729 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
730 		hdr++;
731 	}
732 	malloc_unlock(exceptions);
733 	return hdr;
734 }
735 
736 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
737 			    void *ptr, size_t size)
738 {
739 	struct mdbg_hdr *hdr = ptr;
740 
741 	if (hdr) {
742 		hdr--;
743 		assert_header(hdr);
744 	}
745 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
746 			   mdbg_get_ftr_size(size), size);
747 	if (hdr) {
748 		mdbg_update_hdr(hdr, fname, lineno, size);
749 		hdr++;
750 	}
751 	return hdr;
752 }
753 
754 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
755 {
756 	void *p;
757 	uint32_t exceptions = malloc_lock();
758 
759 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
760 	malloc_unlock(exceptions);
761 	return p;
762 }
763 
764 #define realloc_unlocked(ptr, size) \
765 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
766 
767 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
768 		size_t size)
769 {
770 	struct mdbg_hdr *hdr;
771 	uint32_t exceptions = malloc_lock();
772 
773 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
774 			   alignment, size);
775 	if (hdr) {
776 		mdbg_update_hdr(hdr, fname, lineno, size);
777 		hdr++;
778 	}
779 	malloc_unlock(exceptions);
780 	return hdr;
781 }
782 
783 
784 static void *get_payload_start_size(void *raw_buf, size_t *size)
785 {
786 	struct mdbg_hdr *hdr = raw_buf;
787 
788 	assert(bget_buf_size(hdr) >= hdr->pl_size);
789 	*size = hdr->pl_size;
790 	return hdr + 1;
791 }
792 
793 void mdbg_check(int bufdump)
794 {
795 	struct bpool_iterator itr;
796 	void *b;
797 	uint32_t exceptions = malloc_lock();
798 
799 	raw_malloc_validate_pools();
800 
801 	BPOOL_FOREACH(&itr, &b) {
802 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
803 
804 		assert_header(hdr);
805 
806 		if (bufdump > 0) {
807 			const char *fname = hdr->fname;
808 
809 			if (!fname)
810 				fname = "unknown";
811 
812 			IMSG("buffer: %d bytes %s:%d\n",
813 				hdr->pl_size, fname, hdr->line);
814 		}
815 	}
816 
817 	malloc_unlock(exceptions);
818 }
819 
820 #else
821 
822 void *malloc(size_t size)
823 {
824 	void *p;
825 	uint32_t exceptions = malloc_lock();
826 
827 	p = raw_malloc(0, 0, size);
828 	malloc_unlock(exceptions);
829 	return p;
830 }
831 
832 void free(void *ptr)
833 {
834 	uint32_t exceptions = malloc_lock();
835 
836 	raw_free(ptr);
837 	malloc_unlock(exceptions);
838 }
839 
840 void *calloc(size_t nmemb, size_t size)
841 {
842 	void *p;
843 	uint32_t exceptions = malloc_lock();
844 
845 	p = raw_calloc(0, 0, nmemb, size);
846 	malloc_unlock(exceptions);
847 	return p;
848 }
849 
850 static void *realloc_unlocked(void *ptr, size_t size)
851 {
852 	return raw_realloc(ptr, 0, 0, size);
853 }
854 
855 void *realloc(void *ptr, size_t size)
856 {
857 	void *p;
858 	uint32_t exceptions = malloc_lock();
859 
860 	p = realloc_unlocked(ptr, size);
861 	malloc_unlock(exceptions);
862 	return p;
863 }
864 
865 void *memalign(size_t alignment, size_t size)
866 {
867 	void *p;
868 	uint32_t exceptions = malloc_lock();
869 
870 	p = raw_memalign(0, 0, alignment, size);
871 	malloc_unlock(exceptions);
872 	return p;
873 }
874 
875 static void *get_payload_start_size(void *ptr, size_t *size)
876 {
877 	*size = bget_buf_size(ptr);
878 	return ptr;
879 }
880 
881 #endif
882 
883 void malloc_add_pool(void *buf, size_t len)
884 {
885 	void *p;
886 	size_t l;
887 	uint32_t exceptions;
888 	uintptr_t start = (uintptr_t)buf;
889 	uintptr_t end = start + len;
890 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
891 					(~(SizeQuant - 1))) +
892 				sizeof(struct bhead) * 2;
893 
894 
895 	start = ROUNDUP(start, SizeQuant);
896 	end = ROUNDDOWN(end, SizeQuant);
897 	assert(start < end);
898 
899 	if ((end - start) < min_len) {
900 		DMSG("Skipping too small pool");
901 		return;
902 	}
903 
904 	exceptions = malloc_lock();
905 	tag_asan_free((void *)start, end - start);
906 	bpool((void *)start, end - start);
907 	l = malloc_pool_len + 1;
908 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
909 	assert(p);
910 	malloc_pool = p;
911 	malloc_pool[malloc_pool_len].buf = (void *)start;
912 	malloc_pool[malloc_pool_len].len = end - start;
913 #ifdef BufStats
914 	mstats.size += malloc_pool[malloc_pool_len].len;
915 #endif
916 	malloc_pool_len = l;
917 	malloc_unlock(exceptions);
918 }
919 
920 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
921 {
922 	struct bpool_iterator itr;
923 	void *b;
924 	uint8_t *start_buf = buf;
925 	uint8_t *end_buf = start_buf + len;
926 	bool ret = false;
927 	uint32_t exceptions = malloc_lock();
928 
929 	raw_malloc_validate_pools();
930 
931 	/* Check for wrapping */
932 	if (start_buf > end_buf)
933 		goto out;
934 
935 	BPOOL_FOREACH(&itr, &b) {
936 		uint8_t *start_b;
937 		uint8_t *end_b;
938 		size_t s;
939 
940 		start_b = get_payload_start_size(b, &s);
941 		end_b = start_b + s;
942 
943 		if (start_buf >= start_b && end_buf <= end_b) {
944 			ret = true;
945 			goto out;
946 		}
947 	}
948 
949 out:
950 	malloc_unlock(exceptions);
951 
952 	return ret;
953 }
954 
955 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
956 {
957 	uintptr_t buf_start = (uintptr_t) buf;
958 	uintptr_t buf_end = buf_start + len;
959 	size_t n;
960 	bool ret = false;
961 	uint32_t exceptions = malloc_lock();
962 
963 	raw_malloc_validate_pools();
964 
965 	for (n = 0; n < malloc_pool_len; n++) {
966 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
967 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
968 
969 		if (buf_start > buf_end || pool_start > pool_end) {
970 			ret = true;	/* Wrapping buffers, shouldn't happen */
971 			goto out;
972 		}
973 
974 		if (buf_end > pool_start || buf_start < pool_end) {
975 			ret = true;
976 			goto out;
977 		}
978 	}
979 
980 out:
981 	malloc_unlock(exceptions);
982 	return ret;
983 }
984