xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision ef4bc451c262f007562867ea4e5f4ca9f26459fd)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <stdlib.h>
104 #include <stdint.h>
105 #include <stdbool.h>
106 #include <malloc.h>
107 #include <util.h>
108 #include <trace.h>
109 
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/mutex.h>
114 
115 static void malloc_lock(void)
116 {
117 	mutex_lock(&__malloc_mu);
118 }
119 
120 static void malloc_unlock(void)
121 {
122 	mutex_unlock(&__malloc_mu);
123 }
124 
125 static void tag_asan_free(void *buf, size_t len)
126 {
127 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
128 }
129 
130 static void tag_asan_alloced(void *buf, size_t len)
131 {
132 	asan_tag_access(buf, (uint8_t *)buf + len);
133 }
134 
135 #else /*__KERNEL__*/
136 /* Compiling for TA */
137 static void malloc_lock(void)
138 {
139 }
140 
141 static void malloc_unlock(void)
142 {
143 }
144 
145 static void tag_asan_free(void *buf __unused, size_t len __unused)
146 {
147 }
148 
149 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
150 {
151 }
152 #endif /*__KERNEL__*/
153 
154 #include "bget.c"		/* this is ugly, but this is bget */
155 
156 struct malloc_pool {
157 	void *buf;
158 	size_t len;
159 };
160 
161 static struct malloc_pool *malloc_pool;
162 static size_t malloc_pool_len;
163 
164 #ifdef BufStats
165 
166 static struct malloc_stats mstats;
167 
168 static void raw_malloc_return_hook(void *p, size_t requested_size)
169 {
170 	if (totalloc > mstats.max_allocated)
171 		mstats.max_allocated = totalloc;
172 
173 	if (!p) {
174 		mstats.num_alloc_fail++;
175 		if (requested_size > mstats.biggest_alloc_fail) {
176 			mstats.biggest_alloc_fail = requested_size;
177 			mstats.biggest_alloc_fail_used = totalloc;
178 		}
179 	}
180 }
181 
182 void malloc_reset_stats(void)
183 {
184 	malloc_lock();
185 	mstats.max_allocated = 0;
186 	mstats.num_alloc_fail = 0;
187 	mstats.biggest_alloc_fail = 0;
188 	mstats.biggest_alloc_fail_used = 0;
189 	malloc_unlock();
190 }
191 
192 void malloc_get_stats(struct malloc_stats *stats)
193 {
194 	malloc_lock();
195 	memcpy(stats, &mstats, sizeof(*stats));
196 	stats->allocated = totalloc;
197 	malloc_unlock();
198 }
199 
200 #else /* BufStats */
201 
202 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused)
203 {
204 }
205 
206 #endif /* BufStats */
207 
208 #ifdef BufValid
209 static void raw_malloc_validate_pools(void)
210 {
211 	size_t n;
212 
213 	for (n = 0; n < malloc_pool_len; n++)
214 		bpoolv(malloc_pool[n].buf);
215 }
216 #else
217 static void raw_malloc_validate_pools(void)
218 {
219 }
220 #endif
221 
222 struct bpool_iterator {
223 	struct bfhead *next_buf;
224 	size_t pool_idx;
225 };
226 
227 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
228 {
229 	iterator->pool_idx = 0;
230 	iterator->next_buf = BFH(malloc_pool[0].buf);
231 }
232 
233 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
234 		size_t *len, bool *isfree)
235 {
236 	struct bfhead *b = iterator->next_buf;
237 	bufsize bs = b->bh.bsize;
238 
239 	if (bs == ESent)
240 		return false;
241 
242 	if (bs < 0) {
243 		/* Allocated buffer */
244 		bs = -bs;
245 
246 		*isfree = false;
247 	} else {
248 		/* Free Buffer */
249 		*isfree = true;
250 
251 		/* Assert that the free list links are intact */
252 		assert(b->ql.blink->ql.flink == b);
253 		assert(b->ql.flink->ql.blink == b);
254 	}
255 
256 	*buf = (uint8_t *)b + sizeof(struct bhead);
257 	*len = bs - sizeof(struct bhead);
258 
259 	iterator->next_buf = BFH((uint8_t *)b + bs);
260 	return true;
261 }
262 
263 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
264 {
265 	while (true) {
266 		size_t len;
267 		bool isfree;
268 
269 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
270 			if (isfree)
271 				continue;
272 			return true;
273 		}
274 
275 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
276 			return false;
277 
278 		iterator->pool_idx++;
279 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
280 	}
281 }
282 
283 /* Convenience macro for looping over all allocated buffers */
284 #define BPOOL_FOREACH(iterator, bp) \
285 		for (bpool_foreach_iterator_init((iterator)); \
286 			bpool_foreach((iterator), (bp));)
287 
288 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
289 {
290 	void *ptr = NULL;
291 	size_t s = hdr_size + ftr_size + pl_size;
292 
293 	/*
294 	 * Make sure that malloc has correct alignment of returned buffers.
295 	 * The assumption is that uintptr_t will be as wide as the largest
296 	 * required alignment of any type.
297 	 */
298 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
299 
300 	raw_malloc_validate_pools();
301 
302 	/* Check wrapping */
303 	if (s < pl_size)
304 		goto out;
305 
306 	/* BGET doesn't like 0 sized allocations */
307 	if (!s)
308 		s++;
309 
310 	ptr = bget(s);
311 out:
312 	raw_malloc_return_hook(ptr, pl_size);
313 
314 	return ptr;
315 }
316 
317 static void raw_free(void *ptr)
318 {
319 	raw_malloc_validate_pools();
320 
321 	if (ptr)
322 		brel(ptr);
323 }
324 
325 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
326 		size_t pl_size)
327 {
328 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
329 	void *ptr = NULL;
330 
331 	raw_malloc_validate_pools();
332 
333 	/* Check wrapping */
334 	if (s < pl_nmemb || s < pl_size)
335 		goto out;
336 
337 	/* BGET doesn't like 0 sized allocations */
338 	if (!s)
339 		s++;
340 
341 	ptr = bgetz(s);
342 out:
343 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size);
344 
345 	return ptr;
346 }
347 
348 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
349 		size_t pl_size)
350 {
351 	size_t s = hdr_size + ftr_size + pl_size;
352 	void *p = NULL;
353 
354 	/* Check wrapping */
355 	if (s < pl_size)
356 		goto out;
357 
358 	raw_malloc_validate_pools();
359 
360 	/* BGET doesn't like 0 sized allocations */
361 	if (!s)
362 		s++;
363 
364 	p = bgetr(ptr, s);
365 out:
366 	raw_malloc_return_hook(p, pl_size);
367 
368 	return p;
369 }
370 
371 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
372 {
373 	assert(BH((char *)bf + size) == bn);
374 	assert(bn->bsize < 0); /* Next block should be allocated */
375 	/* Next block shouldn't already have free block in front */
376 	assert(bn->prevfree == 0);
377 
378 	/* Create the free buf header */
379 	bf->bh.bsize = size;
380 	bf->bh.prevfree = 0;
381 
382 	/* Update next block to point to the new free buf header */
383 	bn->prevfree = size;
384 
385 	/* Insert the free buffer on the free list */
386 	assert(freelist.ql.blink->ql.flink == &freelist);
387 	assert(freelist.ql.flink->ql.blink == &freelist);
388 	bf->ql.flink = &freelist;
389 	bf->ql.blink = freelist.ql.blink;
390 	freelist.ql.blink = bf;
391 	bf->ql.blink->ql.flink = bf;
392 }
393 
394 static void brel_before(char *orig_buf, char *new_buf)
395 {
396 	struct bfhead *bf;
397 	struct bhead *b;
398 	bufsize size;
399 	bufsize orig_size;
400 
401 	assert(orig_buf < new_buf);
402 	/* There has to be room for the freebuf header */
403 	size = (bufsize)(new_buf - orig_buf);
404 	assert(size >= (SizeQ + sizeof(struct bhead)));
405 
406 	/* Point to head of original buffer */
407 	bf = BFH(orig_buf - sizeof(struct bhead));
408 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
409 
410 	/* Point to head of the becoming new allocated buffer */
411 	b = BH(new_buf - sizeof(struct bhead));
412 
413 	if (bf->bh.prevfree != 0) {
414 		/* Previous buffer is free, consolidate with that buffer */
415 		struct bfhead *bfp;
416 
417 		/* Update the previous free buffer */
418 		bfp = BFH((char *)bf - bf->bh.prevfree);
419 		assert(bfp->bh.bsize == bf->bh.prevfree);
420 		bfp->bh.bsize += size;
421 
422 		/* Make a new allocated buffer header */
423 		b->prevfree = bfp->bh.bsize;
424 		/* Make it negative since it's an allocated buffer */
425 		b->bsize = -(orig_size - size);
426 	} else {
427 		/*
428 		 * Previous buffer is allocated, create a new buffer and
429 		 * insert on the free list.
430 		 */
431 
432 		/* Make it negative since it's an allocated buffer */
433 		b->bsize = -(orig_size - size);
434 
435 		create_free_block(bf, size, b);
436 	}
437 
438 #ifdef BufStats
439 	totalloc -= size;
440 	assert(totalloc >= 0);
441 #endif
442 }
443 
444 static void brel_after(char *buf, bufsize size)
445 {
446 	struct bhead *b = BH(buf - sizeof(struct bhead));
447 	struct bhead *bn;
448 	bufsize new_size = size;
449 	bufsize free_size;
450 
451 	/* Select the size in the same way as in bget() */
452 	if (new_size < SizeQ)
453 		new_size = SizeQ;
454 #ifdef SizeQuant
455 #if SizeQuant > 1
456 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
457 #endif
458 #endif
459 	new_size += sizeof(struct bhead);
460 	assert(new_size <= -b->bsize);
461 
462 	/*
463 	 * Check if there's enough space at the end of the buffer to be
464 	 * able to free anything.
465 	 */
466 	free_size = -b->bsize - new_size;
467 	if (free_size < SizeQ + sizeof(struct bhead))
468 		return;
469 
470 	bn = BH((char *)b - b->bsize);
471 	/*
472 	 * Set the new size of the buffer;
473 	 */
474 	b->bsize = -new_size;
475 	if (bn->bsize > 0) {
476 		/* Next buffer is free, consolidate with that buffer */
477 		struct bfhead *bfn = BFH(bn);
478 		struct bfhead *nbf = BFH((char *)b + new_size);
479 		struct bhead *bnn = BH((char *)bn + bn->bsize);
480 
481 		assert(bfn->bh.prevfree == 0);
482 		assert(bnn->prevfree == bfn->bh.bsize);
483 
484 		/* Construct the new free header */
485 		nbf->bh.prevfree = 0;
486 		nbf->bh.bsize = bfn->bh.bsize + free_size;
487 
488 		/* Update the buffer after this to point to this header */
489 		bnn->prevfree += free_size;
490 
491 		/*
492 		 * Unlink the previous free buffer and link the new free
493 		 * buffer.
494 		 */
495 		assert(bfn->ql.blink->ql.flink == bfn);
496 		assert(bfn->ql.flink->ql.blink == bfn);
497 
498 		/* Assing blink and flink from old free buffer */
499 		nbf->ql.blink = bfn->ql.blink;
500 		nbf->ql.flink = bfn->ql.flink;
501 
502 		/* Replace the old free buffer with the new one */
503 		nbf->ql.blink->ql.flink = nbf;
504 		nbf->ql.flink->ql.blink = nbf;
505 	} else {
506 		/* New buffer is allocated, create a new free buffer */
507 		create_free_block(BFH((char *)b + new_size), free_size, bn);
508 	}
509 
510 #ifdef BufStats
511 	totalloc -= free_size;
512 	assert(totalloc >= 0);
513 #endif
514 
515 }
516 
517 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
518 		size_t size)
519 {
520 	size_t s;
521 	uintptr_t b;
522 
523 	raw_malloc_validate_pools();
524 
525 	if (!IS_POWER_OF_TWO(alignment))
526 		return NULL;
527 
528 	/*
529 	 * Normal malloc with headers always returns something SizeQuant
530 	 * aligned.
531 	 */
532 	if (alignment <= SizeQuant)
533 		return raw_malloc(hdr_size, ftr_size, size);
534 
535 	s = hdr_size + ftr_size + alignment + size +
536 	    SizeQ + sizeof(struct bhead);
537 
538 	/* Check wapping */
539 	if (s < alignment || s < size)
540 		return NULL;
541 
542 	b = (uintptr_t)bget(s);
543 	if (!b)
544 		goto out;
545 
546 	if ((b + hdr_size) & (alignment - 1)) {
547 		/*
548 		 * Returned buffer is not aligned as requested if the
549 		 * hdr_size is added. Find an offset into the buffer
550 		 * that is far enough in to the buffer to be able to free
551 		 * what's in front.
552 		 */
553 		uintptr_t p;
554 
555 		/*
556 		 * Find the point where the buffer including supplied
557 		 * header size should start.
558 		 */
559 		p = b + hdr_size + alignment;
560 		p &= ~(alignment - 1);
561 		p -= hdr_size;
562 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
563 			p += alignment;
564 		assert((p + hdr_size + ftr_size + size) <= (b + s));
565 
566 		/* Free the front part of the buffer */
567 		brel_before((void *)b, (void *)p);
568 
569 		/* Set the new start of the buffer */
570 		b = p;
571 	}
572 
573 	/*
574 	 * Since b is now aligned, release what we don't need at the end of
575 	 * the buffer.
576 	 */
577 	brel_after((void *)b, hdr_size + ftr_size + size);
578 out:
579 	raw_malloc_return_hook((void *)b, size);
580 
581 	return (void *)b;
582 }
583 
584 /* Most of the stuff in this function is copied from bgetr() in bget.c */
585 static __maybe_unused bufsize bget_buf_size(void *buf)
586 {
587 	bufsize osize;          /* Old size of buffer */
588 	struct bhead *b;
589 
590 	b = BH(((char *)buf) - sizeof(struct bhead));
591 	osize = -b->bsize;
592 #ifdef BECtl
593 	if (osize == 0) {
594 		/*  Buffer acquired directly through acqfcn. */
595 		struct bdhead *bd;
596 
597 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
598 		osize = bd->tsize - sizeof(struct bdhead);
599 	} else
600 #endif
601 		osize -= sizeof(struct bhead);
602 	assert(osize > 0);
603 	return osize;
604 }
605 
606 #ifdef ENABLE_MDBG
607 
608 struct mdbg_hdr {
609 	const char *fname;
610 	uint16_t line;
611 	uint32_t pl_size;
612 	uint32_t magic;
613 #if defined(ARM64)
614 	uint64_t pad;
615 #endif
616 };
617 
618 #define MDBG_HEADER_MAGIC	0xadadadad
619 #define MDBG_FOOTER_MAGIC	0xecececec
620 
621 static size_t mdbg_get_ftr_size(size_t pl_size)
622 {
623 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
624 
625 	return ftr_pad + sizeof(uint32_t);
626 }
627 
628 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
629 {
630 	uint32_t *footer;
631 
632 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
633 			      mdbg_get_ftr_size(hdr->pl_size));
634 	footer--;
635 	return footer;
636 }
637 
638 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
639 		int lineno, size_t pl_size)
640 {
641 	uint32_t *footer;
642 
643 	hdr->fname = fname;
644 	hdr->line = lineno;
645 	hdr->pl_size = pl_size;
646 	hdr->magic = MDBG_HEADER_MAGIC;
647 
648 	footer = mdbg_get_footer(hdr);
649 	*footer = MDBG_FOOTER_MAGIC;
650 }
651 
652 void *mdbg_malloc(const char *fname, int lineno, size_t size)
653 {
654 	struct mdbg_hdr *hdr;
655 
656 	malloc_lock();
657 
658 	/*
659 	 * Check struct mdbg_hdr doesn't get bad alignment.
660 	 * This is required by C standard: the buffer returned from
661 	 * malloc() should be aligned with a fundamental alignment.
662 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
663 	 */
664 	COMPILE_TIME_ASSERT(
665 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
666 
667 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
668 			  mdbg_get_ftr_size(size), size);
669 	if (hdr) {
670 		mdbg_update_hdr(hdr, fname, lineno, size);
671 		hdr++;
672 	}
673 
674 	malloc_unlock();
675 	return hdr;
676 }
677 
678 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
679 {
680 	assert(hdr->magic == MDBG_HEADER_MAGIC);
681 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
682 }
683 
684 static void mdbg_free(void *ptr)
685 {
686 	struct mdbg_hdr *hdr = ptr;
687 
688 	if (hdr) {
689 		hdr--;
690 		assert_header(hdr);
691 		hdr->magic = 0;
692 		*mdbg_get_footer(hdr) = 0;
693 		raw_free(hdr);
694 	}
695 }
696 
697 void free(void *ptr)
698 {
699 	malloc_lock();
700 	mdbg_free(ptr);
701 	malloc_unlock();
702 }
703 
704 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
705 {
706 	struct mdbg_hdr *hdr;
707 
708 	malloc_lock();
709 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
710 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
711 	if (hdr) {
712 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
713 		hdr++;
714 	}
715 	malloc_unlock();
716 	return hdr;
717 }
718 
719 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
720 			    void *ptr, size_t size)
721 {
722 	struct mdbg_hdr *hdr = ptr;
723 
724 	if (hdr) {
725 		hdr--;
726 		assert_header(hdr);
727 	}
728 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
729 			   mdbg_get_ftr_size(size), size);
730 	if (hdr) {
731 		mdbg_update_hdr(hdr, fname, lineno, size);
732 		hdr++;
733 	}
734 	return hdr;
735 }
736 
737 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
738 {
739 	void *p;
740 
741 	malloc_lock();
742 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
743 	malloc_unlock();
744 	return p;
745 }
746 
747 #define realloc_unlocked(ptr, size) \
748 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
749 
750 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
751 		size_t size)
752 {
753 	struct mdbg_hdr *hdr;
754 
755 	malloc_lock();
756 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
757 			   alignment, size);
758 	if (hdr) {
759 		mdbg_update_hdr(hdr, fname, lineno, size);
760 		hdr++;
761 	}
762 	malloc_unlock();
763 	return hdr;
764 }
765 
766 
767 static void *get_payload_start_size(void *raw_buf, size_t *size)
768 {
769 	struct mdbg_hdr *hdr = raw_buf;
770 
771 	assert(bget_buf_size(hdr) >= hdr->pl_size);
772 	*size = hdr->pl_size;
773 	return hdr + 1;
774 }
775 
776 void mdbg_check(int bufdump)
777 {
778 	struct bpool_iterator itr;
779 	void *b;
780 
781 	malloc_lock();
782 	raw_malloc_validate_pools();
783 
784 	BPOOL_FOREACH(&itr, &b) {
785 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
786 
787 		assert_header(hdr);
788 
789 		if (bufdump > 0) {
790 			const char *fname = hdr->fname;
791 
792 			if (!fname)
793 				fname = "unknown";
794 
795 			IMSG("buffer: %d bytes %s:%d\n",
796 				hdr->pl_size, fname, hdr->line);
797 		}
798 	}
799 
800 	malloc_unlock();
801 }
802 
803 #else
804 
805 void *malloc(size_t size)
806 {
807 	void *p;
808 
809 	malloc_lock();
810 	p = raw_malloc(0, 0, size);
811 	malloc_unlock();
812 	return p;
813 }
814 
815 void free(void *ptr)
816 {
817 	malloc_lock();
818 	raw_free(ptr);
819 	malloc_unlock();
820 }
821 
822 void *calloc(size_t nmemb, size_t size)
823 {
824 	void *p;
825 
826 	malloc_lock();
827 	p = raw_calloc(0, 0, nmemb, size);
828 	malloc_unlock();
829 	return p;
830 }
831 
832 static void *realloc_unlocked(void *ptr, size_t size)
833 {
834 	return raw_realloc(ptr, 0, 0, size);
835 }
836 
837 void *realloc(void *ptr, size_t size)
838 {
839 	void *p;
840 
841 	malloc_lock();
842 	p = realloc_unlocked(ptr, size);
843 	malloc_unlock();
844 	return p;
845 }
846 
847 void *memalign(size_t alignment, size_t size)
848 {
849 	void *p;
850 
851 	malloc_lock();
852 	p = raw_memalign(0, 0, alignment, size);
853 	malloc_unlock();
854 	return p;
855 }
856 
857 static void *get_payload_start_size(void *ptr, size_t *size)
858 {
859 	*size = bget_buf_size(ptr);
860 	return ptr;
861 }
862 
863 #endif
864 
865 void malloc_add_pool(void *buf, size_t len)
866 {
867 	void *p;
868 	size_t l;
869 	uintptr_t start = (uintptr_t)buf;
870 	uintptr_t end = start + len;
871 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
872 					(~(SizeQuant - 1))) +
873 				sizeof(struct bhead) * 2;
874 
875 
876 	start = ROUNDUP(start, SizeQuant);
877 	end = ROUNDDOWN(end, SizeQuant);
878 	assert(start < end);
879 
880 	if ((end - start) < min_len) {
881 		DMSG("Skipping too small pool");
882 		return;
883 	}
884 
885 	malloc_lock();
886 	tag_asan_free((void *)start, end - start);
887 	bpool((void *)start, end - start);
888 	l = malloc_pool_len + 1;
889 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
890 	assert(p);
891 	malloc_pool = p;
892 	malloc_pool[malloc_pool_len].buf = (void *)start;
893 	malloc_pool[malloc_pool_len].len = end - start;
894 #ifdef BufStats
895 	mstats.size += malloc_pool[malloc_pool_len].len;
896 #endif
897 	malloc_pool_len = l;
898 	malloc_unlock();
899 }
900 
901 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
902 {
903 	struct bpool_iterator itr;
904 	void *b;
905 	uint8_t *start_buf = buf;
906 	uint8_t *end_buf = start_buf + len;
907 	bool ret = false;
908 
909 	malloc_lock();
910 
911 	raw_malloc_validate_pools();
912 
913 	/* Check for wrapping */
914 	if (start_buf > end_buf)
915 		goto out;
916 
917 	BPOOL_FOREACH(&itr, &b) {
918 		uint8_t *start_b;
919 		uint8_t *end_b;
920 		size_t s;
921 
922 		start_b = get_payload_start_size(b, &s);
923 		end_b = start_b + s;
924 
925 		if (start_buf >= start_b && end_buf <= end_b) {
926 			ret = true;
927 			goto out;
928 		}
929 	}
930 
931 out:
932 	malloc_unlock();
933 
934 	return ret;
935 }
936 
937 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
938 {
939 	uintptr_t buf_start = (uintptr_t) buf;
940 	uintptr_t buf_end = buf_start + len;
941 	size_t n;
942 	bool ret = false;
943 
944 	malloc_lock();
945 
946 	raw_malloc_validate_pools();
947 
948 	for (n = 0; n < malloc_pool_len; n++) {
949 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
950 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
951 
952 		if (buf_start > buf_end || pool_start > pool_end) {
953 			ret = true;	/* Wrapping buffers, shouldn't happen */
954 			goto out;
955 		}
956 
957 		if (buf_end > pool_start || buf_start < pool_end) {
958 			ret = true;
959 			goto out;
960 		}
961 	}
962 
963 out:
964 	malloc_unlock();
965 	return ret;
966 }
967