xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 0fcbddd4dd17a070eef49e4a45ef5580467e001c)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #define SizeQuant   8		/* Buffer allocation size quantum:
44 				   all buffers allocated are a
45 				   multiple of this size.  This
46 				   MUST be a power of two. */
47 
48 #ifdef BGET_ENABLE_OPTION
49 #define BufDump     1		/* Define this symbol to enable the
50 				   bpoold() function which dumps the
51 				   buffers in a buffer pool. */
52 
53 #define BufValid    1		/* Define this symbol to enable the
54 				   bpoolv() function for validating
55 				   a buffer pool. */
56 
57 #define DumpData    1		/* Define this symbol to enable the
58 				   bufdump() function which allows
59 				   dumping the contents of an allocated
60 				   or free buffer. */
61 
62 #define BufStats    1		/* Define this symbol to enable the
63 				   bstats() function which calculates
64 				   the total free space in the buffer
65 				   pool, the largest available
66 				   buffer, and the total space
67 				   currently allocated. */
68 
69 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
70 				   pattern of garbage to trip up
71 				   miscreants who attempt to use
72 				   pointers into released buffers. */
73 
74 #define BestFit     1		/* Use a best fit algorithm when
75 				   searching for space for an
76 				   allocation request.  This uses
77 				   memory more efficiently, but
78 				   allocation will be much slower. */
79 
80 #define BECtl       1		/* Define this symbol to enable the
81 				   bectl() function for automatic
82 				   pool space control.  */
83 #endif
84 
85 #ifdef MEM_DEBUG
86 #undef NDEBUG
87 #define DumpData    1
88 #define BufValid    1
89 #define FreeWipe    1
90 #endif
91 
92 #if defined(CFG_TEE_CORE_DEBUG) && CFG_TEE_CORE_DEBUG != 0
93 #define BufStats    1
94 #endif
95 
96 #include <stdlib.h>
97 #include <stdint.h>
98 #include <stdbool.h>
99 #include <malloc.h>
100 #include "bget.c"		/* this is ugly, but this is bget */
101 #include <util.h>
102 
103 #ifdef __KERNEL__
104 /* Compiling for TEE Core */
105 #include <kernel/mutex.h>
106 
107 static struct mutex malloc_mu = MUTEX_INITIALIZER;
108 
109 static void malloc_lock(void)
110 {
111 	mutex_lock(&malloc_mu);
112 }
113 
114 static void malloc_unlock(void)
115 {
116 	mutex_unlock(&malloc_mu);
117 }
118 
119 #else /*__KERNEL__*/
120 /* Compiling for TA */
121 static void malloc_lock(void)
122 {
123 }
124 
125 static void malloc_unlock(void)
126 {
127 }
128 #endif /*__KERNEL__*/
129 
130 #if defined(ENABLE_MDBG)
131 #include <trace.h>
132 #endif
133 
134 struct malloc_pool {
135 	void *buf;
136 	size_t len;
137 };
138 
139 static struct malloc_pool *malloc_pool;
140 static size_t malloc_pool_len;
141 
142 #ifdef BufStats
143 static size_t max_alloc_heap;
144 
145 static void raw_malloc_save_max_alloced_size(void)
146 {
147 	if (totalloc > max_alloc_heap)
148 		max_alloc_heap = totalloc;
149 }
150 
151 void malloc_reset_max_allocated(void)
152 {
153 	malloc_lock();
154 	max_alloc_heap = 0;
155 	malloc_unlock();
156 }
157 
158 size_t malloc_get_max_allocated(void)
159 {
160 	size_t r;
161 
162 	malloc_lock();
163 	r = max_alloc_heap;
164 	malloc_unlock();
165 	return r;
166 }
167 
168 size_t malloc_get_allocated(void)
169 {
170 	size_t r;
171 
172 	malloc_lock();
173 	r = totalloc;
174 	malloc_unlock();
175 	return r;
176 }
177 
178 #else /* BufStats */
179 
180 static void raw_malloc_save_max_alloced_size(void)
181 {
182 }
183 
184 void malloc_reset_max_allocated(void)
185 {
186 }
187 
188 size_t malloc_get_max_allocated(void)
189 {
190 	return 0;
191 }
192 
193 size_t malloc_get_allocated(void)
194 {
195 	return 0;
196 }
197 #endif /* BufStats */
198 
199 size_t malloc_get_heap_size(void)
200 {
201 	size_t n;
202 	size_t s = 0;
203 
204 	malloc_lock();
205 
206 	for (n = 0; n < malloc_pool_len; n++)
207 		s += malloc_pool[n].len;
208 
209 	malloc_unlock();
210 
211 	return s;
212 }
213 
214 #ifdef BufValid
215 static void raw_malloc_validate_pools(void)
216 {
217 	size_t n;
218 
219 	for (n = 0; n < malloc_pool_len; n++)
220 		bpoolv(malloc_pool[n].buf);
221 }
222 #else
223 static void raw_malloc_validate_pools(void)
224 {
225 }
226 #endif
227 
228 struct bpool_iterator {
229 	struct bfhead *next_buf;
230 	size_t pool_idx;
231 };
232 
233 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
234 {
235 	iterator->pool_idx = 0;
236 	iterator->next_buf = BFH(malloc_pool[0].buf);
237 }
238 
239 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
240 		size_t *len, bool *isfree)
241 {
242 	struct bfhead *b = iterator->next_buf;
243 	bufsize bs = b->bh.bsize;
244 
245 	if (bs == ESent)
246 		return false;
247 
248 	if (bs < 0) {
249 		/* Allocated buffer */
250 		bs = -bs;
251 
252 		*isfree = false;
253 	} else {
254 		/* Free Buffer */
255 		*isfree = true;
256 
257 		/* Assert that the free list links are intact */
258 		assert(b->ql.blink->ql.flink == b);
259 		assert(b->ql.flink->ql.blink == b);
260 	}
261 
262 	*buf = (uint8_t *)b + sizeof(struct bhead);
263 	*len = bs - sizeof(struct bhead);
264 
265 	iterator->next_buf = BFH((uint8_t *)b + bs);
266 	return true;
267 }
268 
269 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
270 {
271 	while (true) {
272 		size_t len;
273 		bool isfree;
274 
275 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
276 			if (isfree)
277 				continue;
278 			return true;
279 		}
280 
281 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
282 			return false;
283 
284 		iterator->pool_idx++;
285 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
286 	}
287 }
288 
289 /* Convenience macro for looping over all allocated buffers */
290 #define BPOOL_FOREACH(iterator, bp) \
291 		for (bpool_foreach_iterator_init((iterator)); \
292 			bpool_foreach((iterator), (bp));)
293 
294 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
295 {
296 	void *ptr;
297 	size_t s = hdr_size + ftr_size + pl_size;
298 
299 	malloc_lock();
300 
301 	/*
302 	 * Make sure that malloc has correct alignment of returned buffers.
303 	 * The assumption is that uintptr_t will be as wide as the largest
304 	 * required alignment of any type.
305 	 */
306 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
307 
308 	raw_malloc_validate_pools();
309 
310 	/* Check wrapping */
311 	if (s < pl_size)
312 		return NULL;
313 
314 	/* BGET doesn't like 0 sized allocations */
315 	if (!s)
316 		s++;
317 
318 	ptr = bget(s);
319 	raw_malloc_save_max_alloced_size();
320 
321 	malloc_unlock();
322 	return ptr;
323 }
324 
325 static void raw_free(void *ptr)
326 {
327 	malloc_lock();
328 
329 	raw_malloc_validate_pools();
330 
331 	if (ptr)
332 		brel(ptr);
333 
334 	malloc_unlock();
335 }
336 
337 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
338 		size_t pl_size)
339 {
340 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
341 	void *ptr;
342 
343 	malloc_lock();
344 
345 	raw_malloc_validate_pools();
346 
347 	/* Check wrapping */
348 	if (s < pl_nmemb || s < pl_size)
349 		return NULL;
350 
351 	/* BGET doesn't like 0 sized allocations */
352 	if (!s)
353 		s++;
354 
355 	ptr = bgetz(s);
356 	raw_malloc_save_max_alloced_size();
357 
358 	malloc_unlock();
359 
360 	return ptr;
361 }
362 
363 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
364 		size_t pl_size)
365 {
366 	size_t s = hdr_size + ftr_size + pl_size;
367 	void *p;
368 
369 	/* Check wrapping */
370 	if (s < pl_size)
371 		return NULL;
372 
373 	malloc_lock();
374 
375 	raw_malloc_validate_pools();
376 
377 	/* BGET doesn't like 0 sized allocations */
378 	if (!s)
379 		s++;
380 
381 	p = bgetr(ptr, s);
382 	raw_malloc_save_max_alloced_size();
383 
384 	malloc_unlock();
385 
386 	return p;
387 }
388 
389 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
390 {
391 	assert(BH((char *)bf + size) == bn);
392 	assert(bn->bsize < 0); /* Next block should be allocated */
393 	/* Next block shouldn't already have free block in front */
394 	assert(bn->prevfree == 0);
395 
396 	/* Create the free buf header */
397 	bf->bh.bsize = size;
398 	bf->bh.prevfree = 0;
399 
400 	/* Update next block to point to the new free buf header */
401 	bn->prevfree = size;
402 
403 	/* Insert the free buffer on the free list */
404 	assert(freelist.ql.blink->ql.flink == &freelist);
405 	assert(freelist.ql.flink->ql.blink == &freelist);
406 	bf->ql.flink = &freelist;
407 	bf->ql.blink = freelist.ql.blink;
408 	freelist.ql.blink = bf;
409 	bf->ql.blink->ql.flink = bf;
410 }
411 
412 static void brel_before(char *orig_buf, char *new_buf)
413 {
414 	struct bfhead *bf;
415 	struct bhead *b;
416 	bufsize size;
417 	bufsize orig_size;
418 
419 	assert(orig_buf < new_buf);
420 	/* There has to be room for the freebuf header */
421 	size = (bufsize)(new_buf - orig_buf);
422 	assert(size >= (SizeQ + sizeof(struct bhead)));
423 
424 	/* Point to head of original buffer */
425 	bf = BFH(orig_buf - sizeof(struct bhead));
426 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
427 
428 	/* Point to head of the becoming new allocated buffer */
429 	b = BH(new_buf - sizeof(struct bhead));
430 
431 	if (bf->bh.prevfree != 0) {
432 		/* Previous buffer is free, consolidate with that buffer */
433 		struct bfhead *bfp;
434 
435 		/* Update the previous free buffer */
436 		bfp = BFH((char *)bf - bf->bh.prevfree);
437 		assert(bfp->bh.bsize == bf->bh.prevfree);
438 		bfp->bh.bsize += size;
439 
440 		/* Make a new allocated buffer header */
441 		b->prevfree = bfp->bh.bsize;
442 		/* Make it negative since it's an allocated buffer */
443 		b->bsize = -(orig_size - size);
444 	} else {
445 		/*
446 		 * Previous buffer is allocated, create a new buffer and
447 		 * insert on the free list.
448 		 */
449 
450 		/* Make it negative since it's an allocated buffer */
451 		b->bsize = -(orig_size - size);
452 
453 		create_free_block(bf, size, b);
454 	}
455 
456 #ifdef BufStats
457 	totalloc -= size;
458 	assert(totalloc >= 0);
459 #endif
460 }
461 
462 static void brel_after(char *buf, bufsize size)
463 {
464 	struct bhead *b = BH(buf - sizeof(struct bhead));
465 	struct bhead *bn;
466 	bufsize new_size = size;
467 	bufsize free_size;
468 
469 	/* Select the size in the same way as in bget() */
470 	if (new_size < SizeQ)
471 		new_size = SizeQ;
472 #ifdef SizeQuant
473 #if SizeQuant > 1
474 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
475 #endif
476 #endif
477 	new_size += sizeof(struct bhead);
478 	assert(new_size <= -b->bsize);
479 
480 	/*
481 	 * Check if there's enough space at the end of the buffer to be
482 	 * able to free anything.
483 	 */
484 	free_size = -b->bsize - new_size;
485 	if (free_size < SizeQ + sizeof(struct bhead))
486 		return;
487 
488 	bn = BH((char *)b - b->bsize);
489 	/*
490 	 * Set the new size of the buffer;
491 	 */
492 	b->bsize = -new_size;
493 	if (bn->bsize > 0) {
494 		/* Next buffer is free, consolidate with that buffer */
495 		struct bfhead *bfn = BFH(bn);
496 		struct bfhead *nbf = BFH((char *)b + new_size);
497 		struct bhead *bnn = BH((char *)bn + bn->bsize);
498 
499 		assert(bfn->bh.prevfree == 0);
500 		assert(bnn->prevfree == bfn->bh.bsize);
501 
502 		/* Construct the new free header */
503 		nbf->bh.prevfree = 0;
504 		nbf->bh.bsize = bfn->bh.bsize + free_size;
505 
506 		/* Update the buffer after this to point to this header */
507 		bnn->prevfree += free_size;
508 
509 		/*
510 		 * Unlink the previous free buffer and link the new free
511 		 * buffer.
512 		 */
513 		assert(bfn->ql.blink->ql.flink == bfn);
514 		assert(bfn->ql.flink->ql.blink == bfn);
515 
516 		/* Assing blink and flink from old free buffer */
517 		nbf->ql.blink = bfn->ql.blink;
518 		nbf->ql.flink = bfn->ql.flink;
519 
520 		/* Replace the old free buffer with the new one */
521 		nbf->ql.blink->ql.flink = nbf;
522 		nbf->ql.flink->ql.blink = nbf;
523 	} else {
524 		/* New buffer is allocated, create a new free buffer */
525 		create_free_block(BFH((char *)b + new_size), free_size, bn);
526 	}
527 
528 #ifdef BufStats
529 	totalloc -= free_size;
530 	assert(totalloc >= 0);
531 #endif
532 
533 }
534 
535 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
536 		size_t size)
537 {
538 	size_t s;
539 	uintptr_t b;
540 
541 	malloc_lock();
542 
543 	raw_malloc_validate_pools();
544 
545 	if (!IS_POWER_OF_TWO(alignment))
546 		return NULL;
547 
548 	/*
549 	 * Normal malloc with headers always returns something SizeQuant
550 	 * aligned.
551 	 */
552 	if (alignment <= SizeQuant)
553 		return raw_malloc(hdr_size, ftr_size, size);
554 
555 	s = hdr_size + ftr_size + alignment + size +
556 	    SizeQ + sizeof(struct bhead);
557 
558 	/* Check wapping */
559 	if (s < alignment || s < size)
560 		return NULL;
561 
562 	b = (uintptr_t)bget(s);
563 	if (!b)
564 		return NULL;
565 
566 	if ((b + hdr_size) & (alignment - 1)) {
567 		/*
568 		 * Returned buffer is not aligned as requested if the
569 		 * hdr_size is added. Find an offset into the buffer
570 		 * that is far enough in to the buffer to be able to free
571 		 * what's in front.
572 		 */
573 		uintptr_t p;
574 
575 		/*
576 		 * Find the point where the buffer including supplied
577 		 * header size should start.
578 		 */
579 		p = b + hdr_size + alignment;
580 		p &= ~(alignment - 1);
581 		p -= hdr_size;
582 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
583 			p += alignment;
584 		assert((p + hdr_size + ftr_size + size) <= (b + s));
585 
586 		/* Free the front part of the buffer */
587 		brel_before((void *)b, (void *)p);
588 
589 		/* Set the new start of the buffer */
590 		b = p;
591 	}
592 
593 	/*
594 	 * Since b is now aligned, release what we don't need at the end of
595 	 * the buffer.
596 	 */
597 	brel_after((void *)b, hdr_size + ftr_size + size);
598 
599 	raw_malloc_save_max_alloced_size();
600 
601 	malloc_unlock();
602 
603 	return (void *)b;
604 }
605 
606 /* Most of the stuff in this function is copied from bgetr() in bget.c */
607 static bufsize bget_buf_size(void *buf)
608 {
609 	bufsize osize;          /* Old size of buffer */
610 	struct bhead *b;
611 
612 	b = BH(((char *)buf) - sizeof(struct bhead));
613 	osize = -b->bsize;
614 #ifdef BECtl
615 	if (osize == 0) {
616 		/*  Buffer acquired directly through acqfcn. */
617 		struct bdhead *bd;
618 
619 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
620 		osize = bd->tsize - sizeof(struct bdhead);
621 	} else
622 #endif
623 		osize -= sizeof(struct bhead);
624 	assert(osize > 0);
625 	return osize;
626 }
627 
628 #ifdef ENABLE_MDBG
629 
630 struct mdbg_hdr {
631 	const char *fname;
632 	uint16_t line;
633 	bool ignore;
634 	uint32_t pl_size;
635 	uint32_t magic;
636 #if defined(ARM64)
637 	uint64_t pad;
638 #endif
639 };
640 
641 #define MDBG_HEADER_MAGIC	0xadadadad
642 #define MDBG_FOOTER_MAGIC	0xecececec
643 
644 /* TODO make this a per thread variable */
645 static enum mdbg_mode mdbg_mode = MDBG_MODE_DYNAMIC;
646 
647 static size_t mdbg_get_ftr_size(size_t pl_size)
648 {
649 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
650 
651 	return ftr_pad + sizeof(uint32_t);
652 }
653 
654 
655 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
656 {
657 	uint32_t *footer;
658 
659 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
660 			      mdbg_get_ftr_size(hdr->pl_size));
661 	footer--;
662 	return footer;
663 }
664 
665 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
666 		int lineno, size_t pl_size)
667 {
668 	uint32_t *footer;
669 
670 	hdr->fname = fname;
671 	hdr->line = lineno;
672 	hdr->pl_size = pl_size;
673 	hdr->magic = MDBG_HEADER_MAGIC;
674 	hdr->ignore = mdbg_mode == MDBG_MODE_STATIC;
675 
676 	footer = mdbg_get_footer(hdr);
677 	*footer = MDBG_FOOTER_MAGIC;
678 }
679 
680 void *mdbg_malloc(const char *fname, int lineno, size_t size)
681 {
682 	struct mdbg_hdr *hdr;
683 
684 	/*
685 	 * Check struct mdbg_hdr doesn't get bad alignment.
686 	 * This is required by C standard: the buffer returned from
687 	 * malloc() should be aligned with a fundamental alignment.
688 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
689 	 */
690 	COMPILE_TIME_ASSERT(
691 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
692 
693 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
694 			  mdbg_get_ftr_size(size), size);
695 	if (hdr) {
696 		mdbg_update_hdr(hdr, fname, lineno, size);
697 		hdr++;
698 	}
699 	return hdr;
700 }
701 
702 static void assert_header(struct mdbg_hdr *hdr)
703 {
704 	assert(hdr->magic == MDBG_HEADER_MAGIC);
705 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
706 }
707 
708 static void mdbg_free(void *ptr)
709 {
710 	struct mdbg_hdr *hdr = ptr;
711 
712 	if (hdr) {
713 		hdr--;
714 		assert_header(hdr);
715 		hdr->magic = 0;
716 		*mdbg_get_footer(hdr) = 0;
717 		raw_free(hdr);
718 	}
719 }
720 
721 void free(void *ptr)
722 {
723 	mdbg_free(ptr);
724 }
725 
726 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
727 {
728 	struct mdbg_hdr *hdr;
729 
730 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
731 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
732 	if (hdr) {
733 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
734 		hdr++;
735 	}
736 	return hdr;
737 }
738 
739 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
740 {
741 	struct mdbg_hdr *hdr = ptr;
742 
743 	if (hdr) {
744 		hdr--;
745 		assert_header(hdr);
746 	}
747 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
748 			   mdbg_get_ftr_size(size), size);
749 	if (hdr) {
750 		mdbg_update_hdr(hdr, fname, lineno, size);
751 		hdr++;
752 	}
753 	return hdr;
754 }
755 
756 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
757 		size_t size)
758 {
759 	struct mdbg_hdr *hdr;
760 
761 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
762 			   alignment, size);
763 	if (hdr) {
764 		mdbg_update_hdr(hdr, fname, lineno, size);
765 		hdr++;
766 	}
767 	return hdr;
768 }
769 
770 
771 static void *get_payload_start_size(void *raw_buf, size_t *size)
772 {
773 	struct mdbg_hdr *hdr = raw_buf;
774 
775 	assert(bget_buf_size(hdr) >= hdr->pl_size);
776 	*size = hdr->pl_size;
777 	return hdr + 1;
778 }
779 
780 void mdbg_check(int bufdump)
781 {
782 	struct bpool_iterator itr;
783 	void *b;
784 
785 	raw_malloc_validate_pools();
786 
787 	BPOOL_FOREACH(&itr, &b) {
788 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
789 
790 		assert_header(hdr);
791 
792 		if (bufdump > 0 || !hdr->ignore) {
793 			const char *fname = hdr->fname;
794 
795 			if (!fname)
796 				fname = "unknown";
797 
798 			DMSG("%s buffer: %d bytes %s:%d\n",
799 				hdr->ignore ? "Ignore" : "Orphaned",
800 				hdr->pl_size, fname, hdr->line);
801 		}
802 	}
803 
804 }
805 
806 enum mdbg_mode mdbg_set_mode(enum mdbg_mode mode)
807 {
808 	enum mdbg_mode old_mode = mdbg_mode;
809 
810 	mdbg_mode = mode;
811 	return old_mode;
812 }
813 
814 #else
815 
816 void *malloc(size_t size)
817 {
818 	return raw_malloc(0, 0, size);
819 }
820 
821 void free(void *ptr)
822 {
823 	raw_free(ptr);
824 }
825 
826 void *calloc(size_t nmemb, size_t size)
827 {
828 	return raw_calloc(0, 0, nmemb, size);
829 }
830 
831 void *realloc(void *ptr, size_t size)
832 {
833 	return raw_realloc(ptr, 0, 0, size);
834 }
835 
836 void *memalign(size_t alignment, size_t size)
837 {
838 	return raw_memalign(0, 0, alignment, size);
839 }
840 
841 static void *get_payload_start_size(void *ptr, size_t *size)
842 {
843 	*size = bget_buf_size(ptr);
844 	return ptr;
845 }
846 
847 #endif
848 
849 
850 
851 void malloc_init(void *buf, size_t len)
852 {
853 	/* Must not be called twice */
854 	assert(!malloc_pool);
855 
856 	malloc_add_pool(buf, len);
857 }
858 
859 void malloc_add_pool(void *buf, size_t len)
860 {
861 	void *p;
862 	size_t l;
863 	uintptr_t start = (uintptr_t)buf;
864 	uintptr_t end = start + len;
865 	enum mdbg_mode old_mode = mdbg_set_mode(MDBG_MODE_STATIC);
866 
867 	start = ROUNDUP(start, SizeQuant);
868 	end = ROUNDDOWN(end, SizeQuant);
869 	assert(start < end);
870 
871 	bpool((void *)start, end - start);
872 
873 	l = malloc_pool_len + 1;
874 	p = realloc(malloc_pool, sizeof(struct malloc_pool) * l);
875 	assert(p);
876 	malloc_pool = p;
877 	malloc_pool[malloc_pool_len].buf = (void *)start;
878 	malloc_pool[malloc_pool_len].len = end - start;
879 	malloc_pool_len = l;
880 	mdbg_set_mode(old_mode);
881 }
882 
883 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
884 {
885 	struct bpool_iterator itr;
886 	void *b;
887 	uint8_t *start_buf = buf;
888 	uint8_t *end_buf = start_buf + len;
889 	bool ret = false;
890 
891 	malloc_lock();
892 
893 	raw_malloc_validate_pools();
894 
895 	/* Check for wrapping */
896 	if (start_buf > end_buf)
897 		goto out;
898 
899 	BPOOL_FOREACH(&itr, &b) {
900 		uint8_t *start_b;
901 		uint8_t *end_b;
902 		size_t s;
903 
904 		start_b = get_payload_start_size(b, &s);
905 		end_b = start_b + s;
906 
907 		if (start_buf >= start_b && end_buf <= end_b) {
908 			ret = true;
909 			goto out;
910 		}
911 	}
912 
913 out:
914 	malloc_unlock();
915 
916 	return ret;
917 }
918 
919 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
920 {
921 	uintptr_t buf_start = (uintptr_t) buf;
922 	uintptr_t buf_end = buf_start + len;
923 	size_t n;
924 	bool ret = false;
925 
926 	malloc_lock();
927 
928 	raw_malloc_validate_pools();
929 
930 	for (n = 0; n < malloc_pool_len; n++) {
931 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
932 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
933 
934 		if (buf_start > buf_end || pool_start > pool_end) {
935 			ret = true;	/* Wrapping buffers, shouldn't happen */
936 			goto out;
937 		}
938 
939 		if (buf_end > pool_start || buf_start < pool_end) {
940 			ret = true;
941 			goto out;
942 		}
943 	}
944 
945 out:
946 	malloc_unlock();
947 	return ret;
948 }
949