xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 9403c583381528e7fb391e3769644cc9653cfbb6)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #define SizeQuant   8		/* Buffer allocation size quantum:
44 				   all buffers allocated are a
45 				   multiple of this size.  This
46 				   MUST be a power of two. */
47 
48 #ifdef BGET_ENABLE_OPTION
49 #define BufDump     1		/* Define this symbol to enable the
50 				   bpoold() function which dumps the
51 				   buffers in a buffer pool. */
52 
53 #define BufValid    1		/* Define this symbol to enable the
54 				   bpoolv() function for validating
55 				   a buffer pool. */
56 
57 #define DumpData    1		/* Define this symbol to enable the
58 				   bufdump() function which allows
59 				   dumping the contents of an allocated
60 				   or free buffer. */
61 
62 #define BufStats    1		/* Define this symbol to enable the
63 				   bstats() function which calculates
64 				   the total free space in the buffer
65 				   pool, the largest available
66 				   buffer, and the total space
67 				   currently allocated. */
68 
69 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
70 				   pattern of garbage to trip up
71 				   miscreants who attempt to use
72 				   pointers into released buffers. */
73 
74 #define BestFit     1		/* Use a best fit algorithm when
75 				   searching for space for an
76 				   allocation request.  This uses
77 				   memory more efficiently, but
78 				   allocation will be much slower. */
79 
80 #define BECtl       1		/* Define this symbol to enable the
81 				   bectl() function for automatic
82 				   pool space control.  */
83 #endif
84 
85 #ifdef MEM_DEBUG
86 #undef NDEBUG
87 #define DumpData    1
88 #define BufValid    1
89 #define FreeWipe    1
90 #endif
91 
92 #if defined(CFG_TEE_CORE_DEBUG) && CFG_TEE_CORE_DEBUG != 0
93 #define BufStats    1
94 #endif
95 
96 #include <stdlib.h>
97 #include <stdint.h>
98 #include <stdbool.h>
99 #include <malloc.h>
100 #include "bget.c"		/* this is ugly, but this is bget */
101 #include <util.h>
102 
103 #ifdef __KERNEL__
104 /* Compiling for TEE Core */
105 #include <kernel/mutex.h>
106 
107 static struct mutex malloc_mu = MUTEX_INITIALIZER;
108 
109 static void malloc_lock(void)
110 {
111 	mutex_lock(&malloc_mu);
112 }
113 
114 static void malloc_unlock(void)
115 {
116 	mutex_unlock(&malloc_mu);
117 }
118 
119 #else /*__KERNEL__*/
120 /* Compiling for TA */
121 static void malloc_lock(void)
122 {
123 }
124 
125 static void malloc_unlock(void)
126 {
127 }
128 #endif /*__KERNEL__*/
129 
130 #if defined(ENABLE_MDBG)
131 #include <trace.h>
132 #endif
133 
134 struct malloc_pool {
135 	void *buf;
136 	size_t len;
137 };
138 
139 static struct malloc_pool *malloc_pool;
140 static size_t malloc_pool_len;
141 
142 #ifdef BufStats
143 static size_t max_alloc_heap;
144 
145 static void raw_malloc_save_max_alloced_size(void)
146 {
147 	if (totalloc > max_alloc_heap)
148 		max_alloc_heap = totalloc;
149 }
150 
151 void malloc_reset_max_allocated(void)
152 {
153 	malloc_lock();
154 	max_alloc_heap = 0;
155 	malloc_unlock();
156 }
157 
158 size_t malloc_get_max_allocated(void)
159 {
160 	size_t r;
161 
162 	malloc_lock();
163 	r = max_alloc_heap;
164 	malloc_unlock();
165 	return r;
166 }
167 
168 size_t malloc_get_allocated(void)
169 {
170 	size_t r;
171 
172 	malloc_lock();
173 	r = totalloc;
174 	malloc_unlock();
175 	return r;
176 }
177 
178 #else /* BufStats */
179 
180 static void raw_malloc_save_max_alloced_size(void)
181 {
182 }
183 
184 void malloc_reset_max_allocated(void)
185 {
186 }
187 
188 size_t malloc_get_max_allocated(void)
189 {
190 	return 0;
191 }
192 
193 size_t malloc_get_allocated(void)
194 {
195 	return 0;
196 }
197 #endif /* BufStats */
198 
199 size_t malloc_get_heap_size(void)
200 {
201 	size_t n;
202 	size_t s = 0;
203 
204 	malloc_lock();
205 
206 	for (n = 0; n < malloc_pool_len; n++)
207 		s += malloc_pool[n].len;
208 
209 	malloc_unlock();
210 
211 	return s;
212 }
213 
214 #ifdef BufValid
215 static void raw_malloc_validate_pools(void)
216 {
217 	size_t n;
218 
219 	for (n = 0; n < malloc_pool_len; n++)
220 		bpoolv(malloc_pool[n].buf);
221 }
222 #else
223 static void raw_malloc_validate_pools(void)
224 {
225 }
226 #endif
227 
228 struct bpool_iterator {
229 	struct bfhead *next_buf;
230 	size_t pool_idx;
231 };
232 
233 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
234 {
235 	iterator->pool_idx = 0;
236 	iterator->next_buf = BFH(malloc_pool[0].buf);
237 }
238 
239 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
240 		size_t *len, bool *isfree)
241 {
242 	struct bfhead *b = iterator->next_buf;
243 	bufsize bs = b->bh.bsize;
244 
245 	if (bs == ESent)
246 		return false;
247 
248 	if (bs < 0) {
249 		/* Allocated buffer */
250 		bs = -bs;
251 
252 		*isfree = false;
253 	} else {
254 		/* Free Buffer */
255 		*isfree = true;
256 
257 		/* Assert that the free list links are intact */
258 		assert(b->ql.blink->ql.flink == b);
259 		assert(b->ql.flink->ql.blink == b);
260 	}
261 
262 	*buf = (uint8_t *)b + sizeof(struct bhead);
263 	*len = bs - sizeof(struct bhead);
264 
265 	iterator->next_buf = BFH((uint8_t *)b + bs);
266 	return true;
267 }
268 
269 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
270 {
271 	while (true) {
272 		size_t len;
273 		bool isfree;
274 
275 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
276 			if (isfree)
277 				continue;
278 			return true;
279 		}
280 
281 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
282 			return false;
283 
284 		iterator->pool_idx++;
285 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
286 	}
287 }
288 
289 /* Convenience macro for looping over all allocated buffers */
290 #define BPOOL_FOREACH(iterator, bp) \
291 		for (bpool_foreach_iterator_init((iterator)); \
292 			bpool_foreach((iterator), (bp));)
293 
294 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
295 {
296 	void *ptr;
297 	size_t s = hdr_size + ftr_size + pl_size;
298 
299 	/*
300 	 * Make sure that malloc has correct alignment of returned buffers.
301 	 * The assumption is that uintptr_t will be as wide as the largest
302 	 * required alignment of any type.
303 	 */
304 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
305 
306 	raw_malloc_validate_pools();
307 
308 	/* Check wrapping */
309 	if (s < pl_size)
310 		return NULL;
311 
312 	/* BGET doesn't like 0 sized allocations */
313 	if (!s)
314 		s++;
315 
316 	ptr = bget(s);
317 	raw_malloc_save_max_alloced_size();
318 
319 	return ptr;
320 }
321 
322 static void raw_free(void *ptr)
323 {
324 	raw_malloc_validate_pools();
325 
326 	if (ptr)
327 		brel(ptr);
328 }
329 
330 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
331 		size_t pl_size)
332 {
333 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
334 	void *ptr;
335 
336 	raw_malloc_validate_pools();
337 
338 	/* Check wrapping */
339 	if (s < pl_nmemb || s < pl_size)
340 		return NULL;
341 
342 	/* BGET doesn't like 0 sized allocations */
343 	if (!s)
344 		s++;
345 
346 	ptr = bgetz(s);
347 	raw_malloc_save_max_alloced_size();
348 
349 	return ptr;
350 }
351 
352 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
353 		size_t pl_size)
354 {
355 	size_t s = hdr_size + ftr_size + pl_size;
356 	void *p;
357 
358 	/* Check wrapping */
359 	if (s < pl_size)
360 		return NULL;
361 
362 	raw_malloc_validate_pools();
363 
364 	/* BGET doesn't like 0 sized allocations */
365 	if (!s)
366 		s++;
367 
368 	p = bgetr(ptr, s);
369 	raw_malloc_save_max_alloced_size();
370 
371 	return p;
372 }
373 
374 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
375 {
376 	assert(BH((char *)bf + size) == bn);
377 	assert(bn->bsize < 0); /* Next block should be allocated */
378 	/* Next block shouldn't already have free block in front */
379 	assert(bn->prevfree == 0);
380 
381 	/* Create the free buf header */
382 	bf->bh.bsize = size;
383 	bf->bh.prevfree = 0;
384 
385 	/* Update next block to point to the new free buf header */
386 	bn->prevfree = size;
387 
388 	/* Insert the free buffer on the free list */
389 	assert(freelist.ql.blink->ql.flink == &freelist);
390 	assert(freelist.ql.flink->ql.blink == &freelist);
391 	bf->ql.flink = &freelist;
392 	bf->ql.blink = freelist.ql.blink;
393 	freelist.ql.blink = bf;
394 	bf->ql.blink->ql.flink = bf;
395 }
396 
397 static void brel_before(char *orig_buf, char *new_buf)
398 {
399 	struct bfhead *bf;
400 	struct bhead *b;
401 	bufsize size;
402 	bufsize orig_size;
403 
404 	assert(orig_buf < new_buf);
405 	/* There has to be room for the freebuf header */
406 	size = (bufsize)(new_buf - orig_buf);
407 	assert(size >= (SizeQ + sizeof(struct bhead)));
408 
409 	/* Point to head of original buffer */
410 	bf = BFH(orig_buf - sizeof(struct bhead));
411 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
412 
413 	/* Point to head of the becoming new allocated buffer */
414 	b = BH(new_buf - sizeof(struct bhead));
415 
416 	if (bf->bh.prevfree != 0) {
417 		/* Previous buffer is free, consolidate with that buffer */
418 		struct bfhead *bfp;
419 
420 		/* Update the previous free buffer */
421 		bfp = BFH((char *)bf - bf->bh.prevfree);
422 		assert(bfp->bh.bsize == bf->bh.prevfree);
423 		bfp->bh.bsize += size;
424 
425 		/* Make a new allocated buffer header */
426 		b->prevfree = bfp->bh.bsize;
427 		/* Make it negative since it's an allocated buffer */
428 		b->bsize = -(orig_size - size);
429 	} else {
430 		/*
431 		 * Previous buffer is allocated, create a new buffer and
432 		 * insert on the free list.
433 		 */
434 
435 		/* Make it negative since it's an allocated buffer */
436 		b->bsize = -(orig_size - size);
437 
438 		create_free_block(bf, size, b);
439 	}
440 
441 #ifdef BufStats
442 	totalloc -= size;
443 	assert(totalloc >= 0);
444 #endif
445 }
446 
447 static void brel_after(char *buf, bufsize size)
448 {
449 	struct bhead *b = BH(buf - sizeof(struct bhead));
450 	struct bhead *bn;
451 	bufsize new_size = size;
452 	bufsize free_size;
453 
454 	/* Select the size in the same way as in bget() */
455 	if (new_size < SizeQ)
456 		new_size = SizeQ;
457 #ifdef SizeQuant
458 #if SizeQuant > 1
459 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
460 #endif
461 #endif
462 	new_size += sizeof(struct bhead);
463 	assert(new_size <= -b->bsize);
464 
465 	/*
466 	 * Check if there's enough space at the end of the buffer to be
467 	 * able to free anything.
468 	 */
469 	free_size = -b->bsize - new_size;
470 	if (free_size < SizeQ + sizeof(struct bhead))
471 		return;
472 
473 	bn = BH((char *)b - b->bsize);
474 	/*
475 	 * Set the new size of the buffer;
476 	 */
477 	b->bsize = -new_size;
478 	if (bn->bsize > 0) {
479 		/* Next buffer is free, consolidate with that buffer */
480 		struct bfhead *bfn = BFH(bn);
481 		struct bfhead *nbf = BFH((char *)b + new_size);
482 		struct bhead *bnn = BH((char *)bn + bn->bsize);
483 
484 		assert(bfn->bh.prevfree == 0);
485 		assert(bnn->prevfree == bfn->bh.bsize);
486 
487 		/* Construct the new free header */
488 		nbf->bh.prevfree = 0;
489 		nbf->bh.bsize = bfn->bh.bsize + free_size;
490 
491 		/* Update the buffer after this to point to this header */
492 		bnn->prevfree += free_size;
493 
494 		/*
495 		 * Unlink the previous free buffer and link the new free
496 		 * buffer.
497 		 */
498 		assert(bfn->ql.blink->ql.flink == bfn);
499 		assert(bfn->ql.flink->ql.blink == bfn);
500 
501 		/* Assing blink and flink from old free buffer */
502 		nbf->ql.blink = bfn->ql.blink;
503 		nbf->ql.flink = bfn->ql.flink;
504 
505 		/* Replace the old free buffer with the new one */
506 		nbf->ql.blink->ql.flink = nbf;
507 		nbf->ql.flink->ql.blink = nbf;
508 	} else {
509 		/* New buffer is allocated, create a new free buffer */
510 		create_free_block(BFH((char *)b + new_size), free_size, bn);
511 	}
512 
513 #ifdef BufStats
514 	totalloc -= free_size;
515 	assert(totalloc >= 0);
516 #endif
517 
518 }
519 
520 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
521 		size_t size)
522 {
523 	size_t s;
524 	uintptr_t b;
525 
526 	raw_malloc_validate_pools();
527 
528 	if (!IS_POWER_OF_TWO(alignment))
529 		return NULL;
530 
531 	/*
532 	 * Normal malloc with headers always returns something SizeQuant
533 	 * aligned.
534 	 */
535 	if (alignment <= SizeQuant)
536 		return raw_malloc(hdr_size, ftr_size, size);
537 
538 	s = hdr_size + ftr_size + alignment + size +
539 	    SizeQ + sizeof(struct bhead);
540 
541 	/* Check wapping */
542 	if (s < alignment || s < size)
543 		return NULL;
544 
545 	b = (uintptr_t)bget(s);
546 	if (!b)
547 		return NULL;
548 
549 	if ((b + hdr_size) & (alignment - 1)) {
550 		/*
551 		 * Returned buffer is not aligned as requested if the
552 		 * hdr_size is added. Find an offset into the buffer
553 		 * that is far enough in to the buffer to be able to free
554 		 * what's in front.
555 		 */
556 		uintptr_t p;
557 
558 		/*
559 		 * Find the point where the buffer including supplied
560 		 * header size should start.
561 		 */
562 		p = b + hdr_size + alignment;
563 		p &= ~(alignment - 1);
564 		p -= hdr_size;
565 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
566 			p += alignment;
567 		assert((p + hdr_size + ftr_size + size) <= (b + s));
568 
569 		/* Free the front part of the buffer */
570 		brel_before((void *)b, (void *)p);
571 
572 		/* Set the new start of the buffer */
573 		b = p;
574 	}
575 
576 	/*
577 	 * Since b is now aligned, release what we don't need at the end of
578 	 * the buffer.
579 	 */
580 	brel_after((void *)b, hdr_size + ftr_size + size);
581 
582 	raw_malloc_save_max_alloced_size();
583 
584 	return (void *)b;
585 }
586 
587 /* Most of the stuff in this function is copied from bgetr() in bget.c */
588 static bufsize bget_buf_size(void *buf)
589 {
590 	bufsize osize;          /* Old size of buffer */
591 	struct bhead *b;
592 
593 	b = BH(((char *)buf) - sizeof(struct bhead));
594 	osize = -b->bsize;
595 #ifdef BECtl
596 	if (osize == 0) {
597 		/*  Buffer acquired directly through acqfcn. */
598 		struct bdhead *bd;
599 
600 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
601 		osize = bd->tsize - sizeof(struct bdhead);
602 	} else
603 #endif
604 		osize -= sizeof(struct bhead);
605 	assert(osize > 0);
606 	return osize;
607 }
608 
609 #ifdef ENABLE_MDBG
610 
611 struct mdbg_hdr {
612 	const char *fname;
613 	uint16_t line;
614 	uint32_t pl_size;
615 	uint32_t magic;
616 #if defined(ARM64)
617 	uint64_t pad;
618 #endif
619 };
620 
621 #define MDBG_HEADER_MAGIC	0xadadadad
622 #define MDBG_FOOTER_MAGIC	0xecececec
623 
624 static size_t mdbg_get_ftr_size(size_t pl_size)
625 {
626 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
627 
628 	return ftr_pad + sizeof(uint32_t);
629 }
630 
631 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
632 {
633 	uint32_t *footer;
634 
635 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
636 			      mdbg_get_ftr_size(hdr->pl_size));
637 	footer--;
638 	return footer;
639 }
640 
641 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
642 		int lineno, size_t pl_size)
643 {
644 	uint32_t *footer;
645 
646 	hdr->fname = fname;
647 	hdr->line = lineno;
648 	hdr->pl_size = pl_size;
649 	hdr->magic = MDBG_HEADER_MAGIC;
650 
651 	footer = mdbg_get_footer(hdr);
652 	*footer = MDBG_FOOTER_MAGIC;
653 }
654 
655 void *mdbg_malloc(const char *fname, int lineno, size_t size)
656 {
657 	struct mdbg_hdr *hdr;
658 
659 	malloc_lock();
660 
661 	/*
662 	 * Check struct mdbg_hdr doesn't get bad alignment.
663 	 * This is required by C standard: the buffer returned from
664 	 * malloc() should be aligned with a fundamental alignment.
665 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
666 	 */
667 	COMPILE_TIME_ASSERT(
668 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
669 
670 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
671 			  mdbg_get_ftr_size(size), size);
672 	if (hdr) {
673 		mdbg_update_hdr(hdr, fname, lineno, size);
674 		hdr++;
675 	}
676 
677 	malloc_unlock();
678 	return hdr;
679 }
680 
681 static void assert_header(struct mdbg_hdr *hdr)
682 {
683 	assert(hdr->magic == MDBG_HEADER_MAGIC);
684 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
685 }
686 
687 static void mdbg_free(void *ptr)
688 {
689 	struct mdbg_hdr *hdr = ptr;
690 
691 	if (hdr) {
692 		hdr--;
693 		assert_header(hdr);
694 		hdr->magic = 0;
695 		*mdbg_get_footer(hdr) = 0;
696 		raw_free(hdr);
697 	}
698 }
699 
700 void free(void *ptr)
701 {
702 	malloc_lock();
703 	mdbg_free(ptr);
704 	malloc_unlock();
705 }
706 
707 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
708 {
709 	struct mdbg_hdr *hdr;
710 
711 	malloc_lock();
712 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
713 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
714 	if (hdr) {
715 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
716 		hdr++;
717 	}
718 	malloc_unlock();
719 	return hdr;
720 }
721 
722 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
723 			    void *ptr, size_t size)
724 {
725 	struct mdbg_hdr *hdr = ptr;
726 
727 	if (hdr) {
728 		hdr--;
729 		assert_header(hdr);
730 	}
731 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
732 			   mdbg_get_ftr_size(size), size);
733 	if (hdr) {
734 		mdbg_update_hdr(hdr, fname, lineno, size);
735 		hdr++;
736 	}
737 	return hdr;
738 }
739 
740 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
741 {
742 	void *p;
743 
744 	malloc_lock();
745 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
746 	malloc_unlock();
747 	return p;
748 }
749 
750 #define realloc_unlocked(ptr, size) \
751 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
752 
753 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
754 		size_t size)
755 {
756 	struct mdbg_hdr *hdr;
757 
758 	malloc_lock();
759 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
760 			   alignment, size);
761 	if (hdr) {
762 		mdbg_update_hdr(hdr, fname, lineno, size);
763 		hdr++;
764 	}
765 	malloc_unlock();
766 	return hdr;
767 }
768 
769 
770 static void *get_payload_start_size(void *raw_buf, size_t *size)
771 {
772 	struct mdbg_hdr *hdr = raw_buf;
773 
774 	assert(bget_buf_size(hdr) >= hdr->pl_size);
775 	*size = hdr->pl_size;
776 	return hdr + 1;
777 }
778 
779 void mdbg_check(int bufdump)
780 {
781 	struct bpool_iterator itr;
782 	void *b;
783 
784 	malloc_lock();
785 	raw_malloc_validate_pools();
786 
787 	BPOOL_FOREACH(&itr, &b) {
788 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
789 
790 		assert_header(hdr);
791 
792 		if (bufdump > 0) {
793 			const char *fname = hdr->fname;
794 
795 			if (!fname)
796 				fname = "unknown";
797 
798 			DMSG("buffer: %d bytes %s:%d\n",
799 				hdr->pl_size, fname, hdr->line);
800 		}
801 	}
802 
803 	malloc_unlock();
804 }
805 
806 #else
807 
808 void *malloc(size_t size)
809 {
810 	void *p;
811 
812 	malloc_lock();
813 	p = raw_malloc(0, 0, size);
814 	malloc_unlock();
815 	return p;
816 }
817 
818 void free(void *ptr)
819 {
820 	malloc_lock();
821 	raw_free(ptr);
822 	malloc_unlock();
823 }
824 
825 void *calloc(size_t nmemb, size_t size)
826 {
827 	void *p;
828 
829 	malloc_lock();
830 	p = raw_calloc(0, 0, nmemb, size);
831 	malloc_unlock();
832 	return p;
833 }
834 
835 static void *realloc_unlocked(void *ptr, size_t size)
836 {
837 	return raw_realloc(ptr, 0, 0, size);
838 }
839 
840 void *realloc(void *ptr, size_t size)
841 {
842 	void *p;
843 
844 	malloc_lock();
845 	p = realloc_unlocked(ptr, size);
846 	malloc_unlock();
847 	return p;
848 }
849 
850 void *memalign(size_t alignment, size_t size)
851 {
852 	void *p;
853 
854 	malloc_lock();
855 	p = raw_memalign(0, 0, alignment, size);
856 	malloc_unlock();
857 	return p;
858 }
859 
860 static void *get_payload_start_size(void *ptr, size_t *size)
861 {
862 	*size = bget_buf_size(ptr);
863 	return ptr;
864 }
865 
866 #endif
867 
868 void malloc_add_pool(void *buf, size_t len)
869 {
870 	void *p;
871 	size_t l;
872 	uintptr_t start = (uintptr_t)buf;
873 	uintptr_t end = start + len;
874 
875 	start = ROUNDUP(start, SizeQuant);
876 	end = ROUNDDOWN(end, SizeQuant);
877 	assert(start < end);
878 
879 	malloc_lock();
880 	bpool((void *)start, end - start);
881 	l = malloc_pool_len + 1;
882 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
883 	assert(p);
884 	malloc_pool = p;
885 	malloc_pool[malloc_pool_len].buf = (void *)start;
886 	malloc_pool[malloc_pool_len].len = end - start;
887 	malloc_pool_len = l;
888 	malloc_unlock();
889 }
890 
891 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
892 {
893 	struct bpool_iterator itr;
894 	void *b;
895 	uint8_t *start_buf = buf;
896 	uint8_t *end_buf = start_buf + len;
897 	bool ret = false;
898 
899 	malloc_lock();
900 
901 	raw_malloc_validate_pools();
902 
903 	/* Check for wrapping */
904 	if (start_buf > end_buf)
905 		goto out;
906 
907 	BPOOL_FOREACH(&itr, &b) {
908 		uint8_t *start_b;
909 		uint8_t *end_b;
910 		size_t s;
911 
912 		start_b = get_payload_start_size(b, &s);
913 		end_b = start_b + s;
914 
915 		if (start_buf >= start_b && end_buf <= end_b) {
916 			ret = true;
917 			goto out;
918 		}
919 	}
920 
921 out:
922 	malloc_unlock();
923 
924 	return ret;
925 }
926 
927 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
928 {
929 	uintptr_t buf_start = (uintptr_t) buf;
930 	uintptr_t buf_end = buf_start + len;
931 	size_t n;
932 	bool ret = false;
933 
934 	malloc_lock();
935 
936 	raw_malloc_validate_pools();
937 
938 	for (n = 0; n < malloc_pool_len; n++) {
939 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
940 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
941 
942 		if (buf_start > buf_end || pool_start > pool_end) {
943 			ret = true;	/* Wrapping buffers, shouldn't happen */
944 			goto out;
945 		}
946 
947 		if (buf_end > pool_start || buf_start < pool_end) {
948 			ret = true;
949 			goto out;
950 		}
951 	}
952 
953 out:
954 	malloc_unlock();
955 	return ret;
956 }
957