xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 82c9f5974071a8d8f64af8e8ec7e0e45c1d3b472)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <stdlib.h>
104 #include <stdint.h>
105 #include <stdbool.h>
106 #include <malloc.h>
107 #include <util.h>
108 #include <trace.h>
109 
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/thread.h>
114 #include <kernel/spinlock.h>
115 
116 static uint32_t malloc_lock(void)
117 {
118 	uint32_t exceptions;
119 
120 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ | THREAD_EXCP_FIQ);
121 	cpu_spin_lock(&__malloc_spinlock);
122 	return exceptions;
123 }
124 
125 static void malloc_unlock(uint32_t exceptions)
126 {
127 	cpu_spin_unlock(&__malloc_spinlock);
128 	thread_unmask_exceptions(exceptions);
129 }
130 
131 static void tag_asan_free(void *buf, size_t len)
132 {
133 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
134 }
135 
136 static void tag_asan_alloced(void *buf, size_t len)
137 {
138 	asan_tag_access(buf, (uint8_t *)buf + len);
139 }
140 
141 #else /*__KERNEL__*/
142 /* Compiling for TA */
143 static uint32_t malloc_lock(void)
144 {
145 	return 0;
146 }
147 
148 static void malloc_unlock(uint32_t exceptions __unused)
149 {
150 }
151 
152 static void tag_asan_free(void *buf __unused, size_t len __unused)
153 {
154 }
155 
156 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
157 {
158 }
159 #endif /*__KERNEL__*/
160 
161 #include "bget.c"		/* this is ugly, but this is bget */
162 
163 struct malloc_pool {
164 	void *buf;
165 	size_t len;
166 };
167 
168 static struct malloc_pool *malloc_pool;
169 static size_t malloc_pool_len;
170 
171 #ifdef BufStats
172 
173 static struct malloc_stats mstats;
174 
175 static void raw_malloc_return_hook(void *p, size_t requested_size)
176 {
177 	if (totalloc > mstats.max_allocated)
178 		mstats.max_allocated = totalloc;
179 
180 	if (!p) {
181 		mstats.num_alloc_fail++;
182 		if (requested_size > mstats.biggest_alloc_fail) {
183 			mstats.biggest_alloc_fail = requested_size;
184 			mstats.biggest_alloc_fail_used = totalloc;
185 		}
186 	}
187 }
188 
189 void malloc_reset_stats(void)
190 {
191 	unsigned int exceptions = malloc_lock();
192 
193 	mstats.max_allocated = 0;
194 	mstats.num_alloc_fail = 0;
195 	mstats.biggest_alloc_fail = 0;
196 	mstats.biggest_alloc_fail_used = 0;
197 	malloc_unlock(exceptions);
198 }
199 
200 void malloc_get_stats(struct malloc_stats *stats)
201 {
202 	uint32_t exceptions = malloc_lock();
203 
204 	memcpy(stats, &mstats, sizeof(*stats));
205 	stats->allocated = totalloc;
206 	malloc_unlock(exceptions);
207 }
208 
209 #else /* BufStats */
210 
211 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
212 {
213 }
214 
215 #endif /* BufStats */
216 
217 #ifdef BufValid
218 static void raw_malloc_validate_pools(void)
219 {
220 	size_t n;
221 
222 	for (n = 0; n < malloc_pool_len; n++)
223 		bpoolv(malloc_pool[n].buf);
224 }
225 #else
226 static void raw_malloc_validate_pools(void)
227 {
228 }
229 #endif
230 
231 struct bpool_iterator {
232 	struct bfhead *next_buf;
233 	size_t pool_idx;
234 };
235 
236 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
237 {
238 	iterator->pool_idx = 0;
239 	iterator->next_buf = BFH(malloc_pool[0].buf);
240 }
241 
242 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
243 		size_t *len, bool *isfree)
244 {
245 	struct bfhead *b = iterator->next_buf;
246 	bufsize bs = b->bh.bsize;
247 
248 	if (bs == ESent)
249 		return false;
250 
251 	if (bs < 0) {
252 		/* Allocated buffer */
253 		bs = -bs;
254 
255 		*isfree = false;
256 	} else {
257 		/* Free Buffer */
258 		*isfree = true;
259 
260 		/* Assert that the free list links are intact */
261 		assert(b->ql.blink->ql.flink == b);
262 		assert(b->ql.flink->ql.blink == b);
263 	}
264 
265 	*buf = (uint8_t *)b + sizeof(struct bhead);
266 	*len = bs - sizeof(struct bhead);
267 
268 	iterator->next_buf = BFH((uint8_t *)b + bs);
269 	return true;
270 }
271 
272 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
273 {
274 	while (true) {
275 		size_t len;
276 		bool isfree;
277 
278 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
279 			if (isfree)
280 				continue;
281 			return true;
282 		}
283 
284 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
285 			return false;
286 
287 		iterator->pool_idx++;
288 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
289 	}
290 }
291 
292 /* Convenience macro for looping over all allocated buffers */
293 #define BPOOL_FOREACH(iterator, bp) \
294 		for (bpool_foreach_iterator_init((iterator)); \
295 			bpool_foreach((iterator), (bp));)
296 
297 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
298 {
299 	void *ptr = NULL;
300 	size_t s = hdr_size + ftr_size + pl_size;
301 
302 	/*
303 	 * Make sure that malloc has correct alignment of returned buffers.
304 	 * The assumption is that uintptr_t will be as wide as the largest
305 	 * required alignment of any type.
306 	 */
307 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
308 
309 	raw_malloc_validate_pools();
310 
311 	/* Check wrapping */
312 	if (s < pl_size)
313 		goto out;
314 
315 	/* BGET doesn't like 0 sized allocations */
316 	if (!s)
317 		s++;
318 
319 	ptr = bget(s);
320 out:
321 	raw_malloc_return_hook(ptr, pl_size);
322 
323 	return ptr;
324 }
325 
326 static void raw_free(void *ptr)
327 {
328 	raw_malloc_validate_pools();
329 
330 	if (ptr)
331 		brel(ptr);
332 }
333 
334 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
335 		size_t pl_size)
336 {
337 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
338 	void *ptr = NULL;
339 
340 	raw_malloc_validate_pools();
341 
342 	/* Check wrapping */
343 	if (s < pl_nmemb || s < pl_size)
344 		goto out;
345 
346 	/* BGET doesn't like 0 sized allocations */
347 	if (!s)
348 		s++;
349 
350 	ptr = bgetz(s);
351 out:
352 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
353 
354 	return ptr;
355 }
356 
357 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
358 		size_t pl_size)
359 {
360 	size_t s = hdr_size + ftr_size + pl_size;
361 	void *p = NULL;
362 
363 	/* Check wrapping */
364 	if (s < pl_size)
365 		goto out;
366 
367 	raw_malloc_validate_pools();
368 
369 	/* BGET doesn't like 0 sized allocations */
370 	if (!s)
371 		s++;
372 
373 	p = bgetr(ptr, s);
374 out:
375 	raw_malloc_return_hook(p, pl_size);
376 
377 	return p;
378 }
379 
380 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
381 {
382 	assert(BH((char *)bf + size) == bn);
383 	assert(bn->bsize < 0); /* Next block should be allocated */
384 	/* Next block shouldn't already have free block in front */
385 	assert(bn->prevfree == 0);
386 
387 	/* Create the free buf header */
388 	bf->bh.bsize = size;
389 	bf->bh.prevfree = 0;
390 
391 	/* Update next block to point to the new free buf header */
392 	bn->prevfree = size;
393 
394 	/* Insert the free buffer on the free list */
395 	assert(freelist.ql.blink->ql.flink == &freelist);
396 	assert(freelist.ql.flink->ql.blink == &freelist);
397 	bf->ql.flink = &freelist;
398 	bf->ql.blink = freelist.ql.blink;
399 	freelist.ql.blink = bf;
400 	bf->ql.blink->ql.flink = bf;
401 }
402 
403 static void brel_before(char *orig_buf, char *new_buf)
404 {
405 	struct bfhead *bf;
406 	struct bhead *b;
407 	bufsize size;
408 	bufsize orig_size;
409 
410 	assert(orig_buf < new_buf);
411 	/* There has to be room for the freebuf header */
412 	size = (bufsize)(new_buf - orig_buf);
413 	assert(size >= (SizeQ + sizeof(struct bhead)));
414 
415 	/* Point to head of original buffer */
416 	bf = BFH(orig_buf - sizeof(struct bhead));
417 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
418 
419 	/* Point to head of the becoming new allocated buffer */
420 	b = BH(new_buf - sizeof(struct bhead));
421 
422 	if (bf->bh.prevfree != 0) {
423 		/* Previous buffer is free, consolidate with that buffer */
424 		struct bfhead *bfp;
425 
426 		/* Update the previous free buffer */
427 		bfp = BFH((char *)bf - bf->bh.prevfree);
428 		assert(bfp->bh.bsize == bf->bh.prevfree);
429 		bfp->bh.bsize += size;
430 
431 		/* Make a new allocated buffer header */
432 		b->prevfree = bfp->bh.bsize;
433 		/* Make it negative since it's an allocated buffer */
434 		b->bsize = -(orig_size - size);
435 	} else {
436 		/*
437 		 * Previous buffer is allocated, create a new buffer and
438 		 * insert on the free list.
439 		 */
440 
441 		/* Make it negative since it's an allocated buffer */
442 		b->bsize = -(orig_size - size);
443 
444 		create_free_block(bf, size, b);
445 	}
446 
447 #ifdef BufStats
448 	totalloc -= size;
449 	assert(totalloc >= 0);
450 #endif
451 }
452 
453 static void brel_after(char *buf, bufsize size)
454 {
455 	struct bhead *b = BH(buf - sizeof(struct bhead));
456 	struct bhead *bn;
457 	bufsize new_size = size;
458 	bufsize free_size;
459 
460 	/* Select the size in the same way as in bget() */
461 	if (new_size < SizeQ)
462 		new_size = SizeQ;
463 #ifdef SizeQuant
464 #if SizeQuant > 1
465 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
466 #endif
467 #endif
468 	new_size += sizeof(struct bhead);
469 	assert(new_size <= -b->bsize);
470 
471 	/*
472 	 * Check if there's enough space at the end of the buffer to be
473 	 * able to free anything.
474 	 */
475 	free_size = -b->bsize - new_size;
476 	if (free_size < SizeQ + sizeof(struct bhead))
477 		return;
478 
479 	bn = BH((char *)b - b->bsize);
480 	/*
481 	 * Set the new size of the buffer;
482 	 */
483 	b->bsize = -new_size;
484 	if (bn->bsize > 0) {
485 		/* Next buffer is free, consolidate with that buffer */
486 		struct bfhead *bfn = BFH(bn);
487 		struct bfhead *nbf = BFH((char *)b + new_size);
488 		struct bhead *bnn = BH((char *)bn + bn->bsize);
489 
490 		assert(bfn->bh.prevfree == 0);
491 		assert(bnn->prevfree == bfn->bh.bsize);
492 
493 		/* Construct the new free header */
494 		nbf->bh.prevfree = 0;
495 		nbf->bh.bsize = bfn->bh.bsize + free_size;
496 
497 		/* Update the buffer after this to point to this header */
498 		bnn->prevfree += free_size;
499 
500 		/*
501 		 * Unlink the previous free buffer and link the new free
502 		 * buffer.
503 		 */
504 		assert(bfn->ql.blink->ql.flink == bfn);
505 		assert(bfn->ql.flink->ql.blink == bfn);
506 
507 		/* Assing blink and flink from old free buffer */
508 		nbf->ql.blink = bfn->ql.blink;
509 		nbf->ql.flink = bfn->ql.flink;
510 
511 		/* Replace the old free buffer with the new one */
512 		nbf->ql.blink->ql.flink = nbf;
513 		nbf->ql.flink->ql.blink = nbf;
514 	} else {
515 		/* New buffer is allocated, create a new free buffer */
516 		create_free_block(BFH((char *)b + new_size), free_size, bn);
517 	}
518 
519 #ifdef BufStats
520 	totalloc -= free_size;
521 	assert(totalloc >= 0);
522 #endif
523 
524 }
525 
526 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
527 		size_t size)
528 {
529 	size_t s;
530 	uintptr_t b;
531 
532 	raw_malloc_validate_pools();
533 
534 	if (!IS_POWER_OF_TWO(alignment))
535 		return NULL;
536 
537 	/*
538 	 * Normal malloc with headers always returns something SizeQuant
539 	 * aligned.
540 	 */
541 	if (alignment <= SizeQuant)
542 		return raw_malloc(hdr_size, ftr_size, size);
543 
544 	s = hdr_size + ftr_size + alignment + size +
545 	    SizeQ + sizeof(struct bhead);
546 
547 	/* Check wapping */
548 	if (s < alignment || s < size)
549 		return NULL;
550 
551 	b = (uintptr_t)bget(s);
552 	if (!b)
553 		goto out;
554 
555 	if ((b + hdr_size) & (alignment - 1)) {
556 		/*
557 		 * Returned buffer is not aligned as requested if the
558 		 * hdr_size is added. Find an offset into the buffer
559 		 * that is far enough in to the buffer to be able to free
560 		 * what's in front.
561 		 */
562 		uintptr_t p;
563 
564 		/*
565 		 * Find the point where the buffer including supplied
566 		 * header size should start.
567 		 */
568 		p = b + hdr_size + alignment;
569 		p &= ~(alignment - 1);
570 		p -= hdr_size;
571 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
572 			p += alignment;
573 		assert((p + hdr_size + ftr_size + size) <= (b + s));
574 
575 		/* Free the front part of the buffer */
576 		brel_before((void *)b, (void *)p);
577 
578 		/* Set the new start of the buffer */
579 		b = p;
580 	}
581 
582 	/*
583 	 * Since b is now aligned, release what we don't need at the end of
584 	 * the buffer.
585 	 */
586 	brel_after((void *)b, hdr_size + ftr_size + size);
587 out:
588 	raw_malloc_return_hook((void *)b, size);
589 
590 	return (void *)b;
591 }
592 
593 /* Most of the stuff in this function is copied from bgetr() in bget.c */
594 static __maybe_unused bufsize bget_buf_size(void *buf)
595 {
596 	bufsize osize;          /* Old size of buffer */
597 	struct bhead *b;
598 
599 	b = BH(((char *)buf) - sizeof(struct bhead));
600 	osize = -b->bsize;
601 #ifdef BECtl
602 	if (osize == 0) {
603 		/*  Buffer acquired directly through acqfcn. */
604 		struct bdhead *bd;
605 
606 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
607 		osize = bd->tsize - sizeof(struct bdhead);
608 	} else
609 #endif
610 		osize -= sizeof(struct bhead);
611 	assert(osize > 0);
612 	return osize;
613 }
614 
615 #ifdef ENABLE_MDBG
616 
617 struct mdbg_hdr {
618 	const char *fname;
619 	uint16_t line;
620 	uint32_t pl_size;
621 	uint32_t magic;
622 #if defined(ARM64)
623 	uint64_t pad;
624 #endif
625 };
626 
627 #define MDBG_HEADER_MAGIC	0xadadadad
628 #define MDBG_FOOTER_MAGIC	0xecececec
629 
630 static size_t mdbg_get_ftr_size(size_t pl_size)
631 {
632 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
633 
634 	return ftr_pad + sizeof(uint32_t);
635 }
636 
637 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
638 {
639 	uint32_t *footer;
640 
641 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
642 			      mdbg_get_ftr_size(hdr->pl_size));
643 	footer--;
644 	return footer;
645 }
646 
647 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
648 		int lineno, size_t pl_size)
649 {
650 	uint32_t *footer;
651 
652 	hdr->fname = fname;
653 	hdr->line = lineno;
654 	hdr->pl_size = pl_size;
655 	hdr->magic = MDBG_HEADER_MAGIC;
656 
657 	footer = mdbg_get_footer(hdr);
658 	*footer = MDBG_FOOTER_MAGIC;
659 }
660 
661 void *mdbg_malloc(const char *fname, int lineno, size_t size)
662 {
663 	struct mdbg_hdr *hdr;
664 	uint32_t exceptions = malloc_lock();
665 
666 	/*
667 	 * Check struct mdbg_hdr doesn't get bad alignment.
668 	 * This is required by C standard: the buffer returned from
669 	 * malloc() should be aligned with a fundamental alignment.
670 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
671 	 */
672 	COMPILE_TIME_ASSERT(
673 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
674 
675 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
676 			  mdbg_get_ftr_size(size), size);
677 	if (hdr) {
678 		mdbg_update_hdr(hdr, fname, lineno, size);
679 		hdr++;
680 	}
681 
682 	malloc_unlock(exceptions);
683 	return hdr;
684 }
685 
686 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
687 {
688 	assert(hdr->magic == MDBG_HEADER_MAGIC);
689 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
690 }
691 
692 static void mdbg_free(void *ptr)
693 {
694 	struct mdbg_hdr *hdr = ptr;
695 
696 	if (hdr) {
697 		hdr--;
698 		assert_header(hdr);
699 		hdr->magic = 0;
700 		*mdbg_get_footer(hdr) = 0;
701 		raw_free(hdr);
702 	}
703 }
704 
705 void free(void *ptr)
706 {
707 	uint32_t exceptions = malloc_lock();
708 
709 	mdbg_free(ptr);
710 	malloc_unlock(exceptions);
711 }
712 
713 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
714 {
715 	struct mdbg_hdr *hdr;
716 	uint32_t exceptions = malloc_lock();
717 
718 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
719 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
720 	if (hdr) {
721 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
722 		hdr++;
723 	}
724 	malloc_unlock(exceptions);
725 	return hdr;
726 }
727 
728 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
729 			    void *ptr, size_t size)
730 {
731 	struct mdbg_hdr *hdr = ptr;
732 
733 	if (hdr) {
734 		hdr--;
735 		assert_header(hdr);
736 	}
737 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
738 			   mdbg_get_ftr_size(size), size);
739 	if (hdr) {
740 		mdbg_update_hdr(hdr, fname, lineno, size);
741 		hdr++;
742 	}
743 	return hdr;
744 }
745 
746 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
747 {
748 	void *p;
749 	uint32_t exceptions = malloc_lock();
750 
751 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
752 	malloc_unlock(exceptions);
753 	return p;
754 }
755 
756 #define realloc_unlocked(ptr, size) \
757 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
758 
759 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
760 		size_t size)
761 {
762 	struct mdbg_hdr *hdr;
763 	uint32_t exceptions = malloc_lock();
764 
765 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
766 			   alignment, size);
767 	if (hdr) {
768 		mdbg_update_hdr(hdr, fname, lineno, size);
769 		hdr++;
770 	}
771 	malloc_unlock(exceptions);
772 	return hdr;
773 }
774 
775 
776 static void *get_payload_start_size(void *raw_buf, size_t *size)
777 {
778 	struct mdbg_hdr *hdr = raw_buf;
779 
780 	assert(bget_buf_size(hdr) >= hdr->pl_size);
781 	*size = hdr->pl_size;
782 	return hdr + 1;
783 }
784 
785 void mdbg_check(int bufdump)
786 {
787 	struct bpool_iterator itr;
788 	void *b;
789 	uint32_t exceptions = malloc_lock();
790 
791 	raw_malloc_validate_pools();
792 
793 	BPOOL_FOREACH(&itr, &b) {
794 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
795 
796 		assert_header(hdr);
797 
798 		if (bufdump > 0) {
799 			const char *fname = hdr->fname;
800 
801 			if (!fname)
802 				fname = "unknown";
803 
804 			IMSG("buffer: %d bytes %s:%d\n",
805 				hdr->pl_size, fname, hdr->line);
806 		}
807 	}
808 
809 	malloc_unlock(exceptions);
810 }
811 
812 #else
813 
814 void *malloc(size_t size)
815 {
816 	void *p;
817 	uint32_t exceptions = malloc_lock();
818 
819 	p = raw_malloc(0, 0, size);
820 	malloc_unlock(exceptions);
821 	return p;
822 }
823 
824 void free(void *ptr)
825 {
826 	uint32_t exceptions = malloc_lock();
827 
828 	raw_free(ptr);
829 	malloc_unlock(exceptions);
830 }
831 
832 void *calloc(size_t nmemb, size_t size)
833 {
834 	void *p;
835 	uint32_t exceptions = malloc_lock();
836 
837 	p = raw_calloc(0, 0, nmemb, size);
838 	malloc_unlock(exceptions);
839 	return p;
840 }
841 
842 static void *realloc_unlocked(void *ptr, size_t size)
843 {
844 	return raw_realloc(ptr, 0, 0, size);
845 }
846 
847 void *realloc(void *ptr, size_t size)
848 {
849 	void *p;
850 	uint32_t exceptions = malloc_lock();
851 
852 	p = realloc_unlocked(ptr, size);
853 	malloc_unlock(exceptions);
854 	return p;
855 }
856 
857 void *memalign(size_t alignment, size_t size)
858 {
859 	void *p;
860 	uint32_t exceptions = malloc_lock();
861 
862 	p = raw_memalign(0, 0, alignment, size);
863 	malloc_unlock(exceptions);
864 	return p;
865 }
866 
867 static void *get_payload_start_size(void *ptr, size_t *size)
868 {
869 	*size = bget_buf_size(ptr);
870 	return ptr;
871 }
872 
873 #endif
874 
875 void malloc_add_pool(void *buf, size_t len)
876 {
877 	void *p;
878 	size_t l;
879 	uint32_t exceptions;
880 	uintptr_t start = (uintptr_t)buf;
881 	uintptr_t end = start + len;
882 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
883 					(~(SizeQuant - 1))) +
884 				sizeof(struct bhead) * 2;
885 
886 
887 	start = ROUNDUP(start, SizeQuant);
888 	end = ROUNDDOWN(end, SizeQuant);
889 	assert(start < end);
890 
891 	if ((end - start) < min_len) {
892 		DMSG("Skipping too small pool");
893 		return;
894 	}
895 
896 	exceptions = malloc_lock();
897 	tag_asan_free((void *)start, end - start);
898 	bpool((void *)start, end - start);
899 	l = malloc_pool_len + 1;
900 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
901 	assert(p);
902 	malloc_pool = p;
903 	malloc_pool[malloc_pool_len].buf = (void *)start;
904 	malloc_pool[malloc_pool_len].len = end - start;
905 #ifdef BufStats
906 	mstats.size += malloc_pool[malloc_pool_len].len;
907 #endif
908 	malloc_pool_len = l;
909 	malloc_unlock(exceptions);
910 }
911 
912 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
913 {
914 	struct bpool_iterator itr;
915 	void *b;
916 	uint8_t *start_buf = buf;
917 	uint8_t *end_buf = start_buf + len;
918 	bool ret = false;
919 	uint32_t exceptions = malloc_lock();
920 
921 	raw_malloc_validate_pools();
922 
923 	/* Check for wrapping */
924 	if (start_buf > end_buf)
925 		goto out;
926 
927 	BPOOL_FOREACH(&itr, &b) {
928 		uint8_t *start_b;
929 		uint8_t *end_b;
930 		size_t s;
931 
932 		start_b = get_payload_start_size(b, &s);
933 		end_b = start_b + s;
934 
935 		if (start_buf >= start_b && end_buf <= end_b) {
936 			ret = true;
937 			goto out;
938 		}
939 	}
940 
941 out:
942 	malloc_unlock(exceptions);
943 
944 	return ret;
945 }
946 
947 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
948 {
949 	uintptr_t buf_start = (uintptr_t) buf;
950 	uintptr_t buf_end = buf_start + len;
951 	size_t n;
952 	bool ret = false;
953 	uint32_t exceptions = malloc_lock();
954 
955 	raw_malloc_validate_pools();
956 
957 	for (n = 0; n < malloc_pool_len; n++) {
958 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
959 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
960 
961 		if (buf_start > buf_end || pool_start > pool_end) {
962 			ret = true;	/* Wrapping buffers, shouldn't happen */
963 			goto out;
964 		}
965 
966 		if (buf_end > pool_start || buf_start < pool_end) {
967 			ret = true;
968 			goto out;
969 		}
970 	}
971 
972 out:
973 	malloc_unlock(exceptions);
974 	return ret;
975 }
976