xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision a50cb361d9e5735f197ccc87beb0d24af8315369)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <stdlib.h>
104 #include <stdint.h>
105 #include <stdbool.h>
106 #include <malloc.h>
107 #include "bget.c"		/* this is ugly, but this is bget */
108 #include <util.h>
109 #include <trace.h>
110 
111 #ifdef __KERNEL__
112 /* Compiling for TEE Core */
113 #include <kernel/mutex.h>
114 
115 static struct mutex malloc_mu = MUTEX_INITIALIZER;
116 
117 static void malloc_lock(void)
118 {
119 	mutex_lock(&malloc_mu);
120 }
121 
122 static void malloc_unlock(void)
123 {
124 	mutex_unlock(&malloc_mu);
125 }
126 
127 #else /*__KERNEL__*/
128 /* Compiling for TA */
129 static void malloc_lock(void)
130 {
131 }
132 
133 static void malloc_unlock(void)
134 {
135 }
136 #endif /*__KERNEL__*/
137 
138 #if defined(ENABLE_MDBG)
139 #include <trace.h>
140 #endif
141 
142 struct malloc_pool {
143 	void *buf;
144 	size_t len;
145 };
146 
147 static struct malloc_pool *malloc_pool;
148 static size_t malloc_pool_len;
149 
150 #ifdef BufStats
151 static size_t max_alloc_heap;
152 
153 static void raw_malloc_save_max_alloced_size(void)
154 {
155 	if (totalloc > max_alloc_heap)
156 		max_alloc_heap = totalloc;
157 }
158 
159 void malloc_reset_max_allocated(void)
160 {
161 	malloc_lock();
162 	max_alloc_heap = 0;
163 	malloc_unlock();
164 }
165 
166 size_t malloc_get_max_allocated(void)
167 {
168 	size_t r;
169 
170 	malloc_lock();
171 	r = max_alloc_heap;
172 	malloc_unlock();
173 	return r;
174 }
175 
176 size_t malloc_get_allocated(void)
177 {
178 	size_t r;
179 
180 	malloc_lock();
181 	r = totalloc;
182 	malloc_unlock();
183 	return r;
184 }
185 
186 #else /* BufStats */
187 
188 static void raw_malloc_save_max_alloced_size(void)
189 {
190 }
191 
192 void malloc_reset_max_allocated(void)
193 {
194 }
195 
196 size_t malloc_get_max_allocated(void)
197 {
198 	return 0;
199 }
200 
201 size_t malloc_get_allocated(void)
202 {
203 	return 0;
204 }
205 #endif /* BufStats */
206 
207 size_t malloc_get_heap_size(void)
208 {
209 	size_t n;
210 	size_t s = 0;
211 
212 	malloc_lock();
213 
214 	for (n = 0; n < malloc_pool_len; n++)
215 		s += malloc_pool[n].len;
216 
217 	malloc_unlock();
218 
219 	return s;
220 }
221 
222 #ifdef BufValid
223 static void raw_malloc_validate_pools(void)
224 {
225 	size_t n;
226 
227 	for (n = 0; n < malloc_pool_len; n++)
228 		bpoolv(malloc_pool[n].buf);
229 }
230 #else
231 static void raw_malloc_validate_pools(void)
232 {
233 }
234 #endif
235 
236 struct bpool_iterator {
237 	struct bfhead *next_buf;
238 	size_t pool_idx;
239 };
240 
241 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
242 {
243 	iterator->pool_idx = 0;
244 	iterator->next_buf = BFH(malloc_pool[0].buf);
245 }
246 
247 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
248 		size_t *len, bool *isfree)
249 {
250 	struct bfhead *b = iterator->next_buf;
251 	bufsize bs = b->bh.bsize;
252 
253 	if (bs == ESent)
254 		return false;
255 
256 	if (bs < 0) {
257 		/* Allocated buffer */
258 		bs = -bs;
259 
260 		*isfree = false;
261 	} else {
262 		/* Free Buffer */
263 		*isfree = true;
264 
265 		/* Assert that the free list links are intact */
266 		assert(b->ql.blink->ql.flink == b);
267 		assert(b->ql.flink->ql.blink == b);
268 	}
269 
270 	*buf = (uint8_t *)b + sizeof(struct bhead);
271 	*len = bs - sizeof(struct bhead);
272 
273 	iterator->next_buf = BFH((uint8_t *)b + bs);
274 	return true;
275 }
276 
277 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
278 {
279 	while (true) {
280 		size_t len;
281 		bool isfree;
282 
283 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
284 			if (isfree)
285 				continue;
286 			return true;
287 		}
288 
289 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
290 			return false;
291 
292 		iterator->pool_idx++;
293 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
294 	}
295 }
296 
297 /* Convenience macro for looping over all allocated buffers */
298 #define BPOOL_FOREACH(iterator, bp) \
299 		for (bpool_foreach_iterator_init((iterator)); \
300 			bpool_foreach((iterator), (bp));)
301 
302 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
303 {
304 	void *ptr;
305 	size_t s = hdr_size + ftr_size + pl_size;
306 
307 	/*
308 	 * Make sure that malloc has correct alignment of returned buffers.
309 	 * The assumption is that uintptr_t will be as wide as the largest
310 	 * required alignment of any type.
311 	 */
312 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
313 
314 	raw_malloc_validate_pools();
315 
316 	/* Check wrapping */
317 	if (s < pl_size)
318 		return NULL;
319 
320 	/* BGET doesn't like 0 sized allocations */
321 	if (!s)
322 		s++;
323 
324 	ptr = bget(s);
325 	raw_malloc_save_max_alloced_size();
326 
327 	return ptr;
328 }
329 
330 static void raw_free(void *ptr)
331 {
332 	raw_malloc_validate_pools();
333 
334 	if (ptr)
335 		brel(ptr);
336 }
337 
338 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
339 		size_t pl_size)
340 {
341 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
342 	void *ptr;
343 
344 	raw_malloc_validate_pools();
345 
346 	/* Check wrapping */
347 	if (s < pl_nmemb || s < pl_size)
348 		return NULL;
349 
350 	/* BGET doesn't like 0 sized allocations */
351 	if (!s)
352 		s++;
353 
354 	ptr = bgetz(s);
355 	raw_malloc_save_max_alloced_size();
356 
357 	return ptr;
358 }
359 
360 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
361 		size_t pl_size)
362 {
363 	size_t s = hdr_size + ftr_size + pl_size;
364 	void *p;
365 
366 	/* Check wrapping */
367 	if (s < pl_size)
368 		return NULL;
369 
370 	raw_malloc_validate_pools();
371 
372 	/* BGET doesn't like 0 sized allocations */
373 	if (!s)
374 		s++;
375 
376 	p = bgetr(ptr, s);
377 	raw_malloc_save_max_alloced_size();
378 
379 	return p;
380 }
381 
382 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
383 {
384 	assert(BH((char *)bf + size) == bn);
385 	assert(bn->bsize < 0); /* Next block should be allocated */
386 	/* Next block shouldn't already have free block in front */
387 	assert(bn->prevfree == 0);
388 
389 	/* Create the free buf header */
390 	bf->bh.bsize = size;
391 	bf->bh.prevfree = 0;
392 
393 	/* Update next block to point to the new free buf header */
394 	bn->prevfree = size;
395 
396 	/* Insert the free buffer on the free list */
397 	assert(freelist.ql.blink->ql.flink == &freelist);
398 	assert(freelist.ql.flink->ql.blink == &freelist);
399 	bf->ql.flink = &freelist;
400 	bf->ql.blink = freelist.ql.blink;
401 	freelist.ql.blink = bf;
402 	bf->ql.blink->ql.flink = bf;
403 }
404 
405 static void brel_before(char *orig_buf, char *new_buf)
406 {
407 	struct bfhead *bf;
408 	struct bhead *b;
409 	bufsize size;
410 	bufsize orig_size;
411 
412 	assert(orig_buf < new_buf);
413 	/* There has to be room for the freebuf header */
414 	size = (bufsize)(new_buf - orig_buf);
415 	assert(size >= (SizeQ + sizeof(struct bhead)));
416 
417 	/* Point to head of original buffer */
418 	bf = BFH(orig_buf - sizeof(struct bhead));
419 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
420 
421 	/* Point to head of the becoming new allocated buffer */
422 	b = BH(new_buf - sizeof(struct bhead));
423 
424 	if (bf->bh.prevfree != 0) {
425 		/* Previous buffer is free, consolidate with that buffer */
426 		struct bfhead *bfp;
427 
428 		/* Update the previous free buffer */
429 		bfp = BFH((char *)bf - bf->bh.prevfree);
430 		assert(bfp->bh.bsize == bf->bh.prevfree);
431 		bfp->bh.bsize += size;
432 
433 		/* Make a new allocated buffer header */
434 		b->prevfree = bfp->bh.bsize;
435 		/* Make it negative since it's an allocated buffer */
436 		b->bsize = -(orig_size - size);
437 	} else {
438 		/*
439 		 * Previous buffer is allocated, create a new buffer and
440 		 * insert on the free list.
441 		 */
442 
443 		/* Make it negative since it's an allocated buffer */
444 		b->bsize = -(orig_size - size);
445 
446 		create_free_block(bf, size, b);
447 	}
448 
449 #ifdef BufStats
450 	totalloc -= size;
451 	assert(totalloc >= 0);
452 #endif
453 }
454 
455 static void brel_after(char *buf, bufsize size)
456 {
457 	struct bhead *b = BH(buf - sizeof(struct bhead));
458 	struct bhead *bn;
459 	bufsize new_size = size;
460 	bufsize free_size;
461 
462 	/* Select the size in the same way as in bget() */
463 	if (new_size < SizeQ)
464 		new_size = SizeQ;
465 #ifdef SizeQuant
466 #if SizeQuant > 1
467 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
468 #endif
469 #endif
470 	new_size += sizeof(struct bhead);
471 	assert(new_size <= -b->bsize);
472 
473 	/*
474 	 * Check if there's enough space at the end of the buffer to be
475 	 * able to free anything.
476 	 */
477 	free_size = -b->bsize - new_size;
478 	if (free_size < SizeQ + sizeof(struct bhead))
479 		return;
480 
481 	bn = BH((char *)b - b->bsize);
482 	/*
483 	 * Set the new size of the buffer;
484 	 */
485 	b->bsize = -new_size;
486 	if (bn->bsize > 0) {
487 		/* Next buffer is free, consolidate with that buffer */
488 		struct bfhead *bfn = BFH(bn);
489 		struct bfhead *nbf = BFH((char *)b + new_size);
490 		struct bhead *bnn = BH((char *)bn + bn->bsize);
491 
492 		assert(bfn->bh.prevfree == 0);
493 		assert(bnn->prevfree == bfn->bh.bsize);
494 
495 		/* Construct the new free header */
496 		nbf->bh.prevfree = 0;
497 		nbf->bh.bsize = bfn->bh.bsize + free_size;
498 
499 		/* Update the buffer after this to point to this header */
500 		bnn->prevfree += free_size;
501 
502 		/*
503 		 * Unlink the previous free buffer and link the new free
504 		 * buffer.
505 		 */
506 		assert(bfn->ql.blink->ql.flink == bfn);
507 		assert(bfn->ql.flink->ql.blink == bfn);
508 
509 		/* Assing blink and flink from old free buffer */
510 		nbf->ql.blink = bfn->ql.blink;
511 		nbf->ql.flink = bfn->ql.flink;
512 
513 		/* Replace the old free buffer with the new one */
514 		nbf->ql.blink->ql.flink = nbf;
515 		nbf->ql.flink->ql.blink = nbf;
516 	} else {
517 		/* New buffer is allocated, create a new free buffer */
518 		create_free_block(BFH((char *)b + new_size), free_size, bn);
519 	}
520 
521 #ifdef BufStats
522 	totalloc -= free_size;
523 	assert(totalloc >= 0);
524 #endif
525 
526 }
527 
528 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
529 		size_t size)
530 {
531 	size_t s;
532 	uintptr_t b;
533 
534 	raw_malloc_validate_pools();
535 
536 	if (!IS_POWER_OF_TWO(alignment))
537 		return NULL;
538 
539 	/*
540 	 * Normal malloc with headers always returns something SizeQuant
541 	 * aligned.
542 	 */
543 	if (alignment <= SizeQuant)
544 		return raw_malloc(hdr_size, ftr_size, size);
545 
546 	s = hdr_size + ftr_size + alignment + size +
547 	    SizeQ + sizeof(struct bhead);
548 
549 	/* Check wapping */
550 	if (s < alignment || s < size)
551 		return NULL;
552 
553 	b = (uintptr_t)bget(s);
554 	if (!b)
555 		return NULL;
556 
557 	if ((b + hdr_size) & (alignment - 1)) {
558 		/*
559 		 * Returned buffer is not aligned as requested if the
560 		 * hdr_size is added. Find an offset into the buffer
561 		 * that is far enough in to the buffer to be able to free
562 		 * what's in front.
563 		 */
564 		uintptr_t p;
565 
566 		/*
567 		 * Find the point where the buffer including supplied
568 		 * header size should start.
569 		 */
570 		p = b + hdr_size + alignment;
571 		p &= ~(alignment - 1);
572 		p -= hdr_size;
573 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
574 			p += alignment;
575 		assert((p + hdr_size + ftr_size + size) <= (b + s));
576 
577 		/* Free the front part of the buffer */
578 		brel_before((void *)b, (void *)p);
579 
580 		/* Set the new start of the buffer */
581 		b = p;
582 	}
583 
584 	/*
585 	 * Since b is now aligned, release what we don't need at the end of
586 	 * the buffer.
587 	 */
588 	brel_after((void *)b, hdr_size + ftr_size + size);
589 
590 	raw_malloc_save_max_alloced_size();
591 
592 	return (void *)b;
593 }
594 
595 /* Most of the stuff in this function is copied from bgetr() in bget.c */
596 static __maybe_unused bufsize bget_buf_size(void *buf)
597 {
598 	bufsize osize;          /* Old size of buffer */
599 	struct bhead *b;
600 
601 	b = BH(((char *)buf) - sizeof(struct bhead));
602 	osize = -b->bsize;
603 #ifdef BECtl
604 	if (osize == 0) {
605 		/*  Buffer acquired directly through acqfcn. */
606 		struct bdhead *bd;
607 
608 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
609 		osize = bd->tsize - sizeof(struct bdhead);
610 	} else
611 #endif
612 		osize -= sizeof(struct bhead);
613 	assert(osize > 0);
614 	return osize;
615 }
616 
617 #ifdef ENABLE_MDBG
618 
619 struct mdbg_hdr {
620 	const char *fname;
621 	uint16_t line;
622 	uint32_t pl_size;
623 	uint32_t magic;
624 #if defined(ARM64)
625 	uint64_t pad;
626 #endif
627 };
628 
629 #define MDBG_HEADER_MAGIC	0xadadadad
630 #define MDBG_FOOTER_MAGIC	0xecececec
631 
632 static size_t mdbg_get_ftr_size(size_t pl_size)
633 {
634 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
635 
636 	return ftr_pad + sizeof(uint32_t);
637 }
638 
639 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
640 {
641 	uint32_t *footer;
642 
643 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
644 			      mdbg_get_ftr_size(hdr->pl_size));
645 	footer--;
646 	return footer;
647 }
648 
649 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
650 		int lineno, size_t pl_size)
651 {
652 	uint32_t *footer;
653 
654 	hdr->fname = fname;
655 	hdr->line = lineno;
656 	hdr->pl_size = pl_size;
657 	hdr->magic = MDBG_HEADER_MAGIC;
658 
659 	footer = mdbg_get_footer(hdr);
660 	*footer = MDBG_FOOTER_MAGIC;
661 }
662 
663 void *mdbg_malloc(const char *fname, int lineno, size_t size)
664 {
665 	struct mdbg_hdr *hdr;
666 
667 	malloc_lock();
668 
669 	/*
670 	 * Check struct mdbg_hdr doesn't get bad alignment.
671 	 * This is required by C standard: the buffer returned from
672 	 * malloc() should be aligned with a fundamental alignment.
673 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
674 	 */
675 	COMPILE_TIME_ASSERT(
676 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
677 
678 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
679 			  mdbg_get_ftr_size(size), size);
680 	if (hdr) {
681 		mdbg_update_hdr(hdr, fname, lineno, size);
682 		hdr++;
683 	}
684 
685 	malloc_unlock();
686 	return hdr;
687 }
688 
689 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
690 {
691 	assert(hdr->magic == MDBG_HEADER_MAGIC);
692 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
693 }
694 
695 static void mdbg_free(void *ptr)
696 {
697 	struct mdbg_hdr *hdr = ptr;
698 
699 	if (hdr) {
700 		hdr--;
701 		assert_header(hdr);
702 		hdr->magic = 0;
703 		*mdbg_get_footer(hdr) = 0;
704 		raw_free(hdr);
705 	}
706 }
707 
708 void free(void *ptr)
709 {
710 	malloc_lock();
711 	mdbg_free(ptr);
712 	malloc_unlock();
713 }
714 
715 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
716 {
717 	struct mdbg_hdr *hdr;
718 
719 	malloc_lock();
720 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
721 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
722 	if (hdr) {
723 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
724 		hdr++;
725 	}
726 	malloc_unlock();
727 	return hdr;
728 }
729 
730 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
731 			    void *ptr, size_t size)
732 {
733 	struct mdbg_hdr *hdr = ptr;
734 
735 	if (hdr) {
736 		hdr--;
737 		assert_header(hdr);
738 	}
739 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
740 			   mdbg_get_ftr_size(size), size);
741 	if (hdr) {
742 		mdbg_update_hdr(hdr, fname, lineno, size);
743 		hdr++;
744 	}
745 	return hdr;
746 }
747 
748 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
749 {
750 	void *p;
751 
752 	malloc_lock();
753 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
754 	malloc_unlock();
755 	return p;
756 }
757 
758 #define realloc_unlocked(ptr, size) \
759 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
760 
761 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
762 		size_t size)
763 {
764 	struct mdbg_hdr *hdr;
765 
766 	malloc_lock();
767 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
768 			   alignment, size);
769 	if (hdr) {
770 		mdbg_update_hdr(hdr, fname, lineno, size);
771 		hdr++;
772 	}
773 	malloc_unlock();
774 	return hdr;
775 }
776 
777 
778 static void *get_payload_start_size(void *raw_buf, size_t *size)
779 {
780 	struct mdbg_hdr *hdr = raw_buf;
781 
782 	assert(bget_buf_size(hdr) >= hdr->pl_size);
783 	*size = hdr->pl_size;
784 	return hdr + 1;
785 }
786 
787 void mdbg_check(int bufdump)
788 {
789 	struct bpool_iterator itr;
790 	void *b;
791 
792 	malloc_lock();
793 	raw_malloc_validate_pools();
794 
795 	BPOOL_FOREACH(&itr, &b) {
796 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
797 
798 		assert_header(hdr);
799 
800 		if (bufdump > 0) {
801 			const char *fname = hdr->fname;
802 
803 			if (!fname)
804 				fname = "unknown";
805 
806 			IMSG("buffer: %d bytes %s:%d\n",
807 				hdr->pl_size, fname, hdr->line);
808 		}
809 	}
810 
811 	malloc_unlock();
812 }
813 
814 #else
815 
816 void *malloc(size_t size)
817 {
818 	void *p;
819 
820 	malloc_lock();
821 	p = raw_malloc(0, 0, size);
822 	malloc_unlock();
823 	return p;
824 }
825 
826 void free(void *ptr)
827 {
828 	malloc_lock();
829 	raw_free(ptr);
830 	malloc_unlock();
831 }
832 
833 void *calloc(size_t nmemb, size_t size)
834 {
835 	void *p;
836 
837 	malloc_lock();
838 	p = raw_calloc(0, 0, nmemb, size);
839 	malloc_unlock();
840 	return p;
841 }
842 
843 static void *realloc_unlocked(void *ptr, size_t size)
844 {
845 	return raw_realloc(ptr, 0, 0, size);
846 }
847 
848 void *realloc(void *ptr, size_t size)
849 {
850 	void *p;
851 
852 	malloc_lock();
853 	p = realloc_unlocked(ptr, size);
854 	malloc_unlock();
855 	return p;
856 }
857 
858 void *memalign(size_t alignment, size_t size)
859 {
860 	void *p;
861 
862 	malloc_lock();
863 	p = raw_memalign(0, 0, alignment, size);
864 	malloc_unlock();
865 	return p;
866 }
867 
868 static void *get_payload_start_size(void *ptr, size_t *size)
869 {
870 	*size = bget_buf_size(ptr);
871 	return ptr;
872 }
873 
874 #endif
875 
876 void malloc_add_pool(void *buf, size_t len)
877 {
878 	void *p;
879 	size_t l;
880 	uintptr_t start = (uintptr_t)buf;
881 	uintptr_t end = start + len;
882 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
883 					(~(SizeQuant - 1))) +
884 				sizeof(struct bhead) * 2;
885 
886 
887 	start = ROUNDUP(start, SizeQuant);
888 	end = ROUNDDOWN(end, SizeQuant);
889 	assert(start < end);
890 
891 	if ((end - start) < min_len) {
892 		DMSG("Skipping too small pool");
893 		return;
894 	}
895 
896 	malloc_lock();
897 	bpool((void *)start, end - start);
898 	l = malloc_pool_len + 1;
899 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
900 	assert(p);
901 	malloc_pool = p;
902 	malloc_pool[malloc_pool_len].buf = (void *)start;
903 	malloc_pool[malloc_pool_len].len = end - start;
904 	malloc_pool_len = l;
905 	malloc_unlock();
906 }
907 
908 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
909 {
910 	struct bpool_iterator itr;
911 	void *b;
912 	uint8_t *start_buf = buf;
913 	uint8_t *end_buf = start_buf + len;
914 	bool ret = false;
915 
916 	malloc_lock();
917 
918 	raw_malloc_validate_pools();
919 
920 	/* Check for wrapping */
921 	if (start_buf > end_buf)
922 		goto out;
923 
924 	BPOOL_FOREACH(&itr, &b) {
925 		uint8_t *start_b;
926 		uint8_t *end_b;
927 		size_t s;
928 
929 		start_b = get_payload_start_size(b, &s);
930 		end_b = start_b + s;
931 
932 		if (start_buf >= start_b && end_buf <= end_b) {
933 			ret = true;
934 			goto out;
935 		}
936 	}
937 
938 out:
939 	malloc_unlock();
940 
941 	return ret;
942 }
943 
944 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
945 {
946 	uintptr_t buf_start = (uintptr_t) buf;
947 	uintptr_t buf_end = buf_start + len;
948 	size_t n;
949 	bool ret = false;
950 
951 	malloc_lock();
952 
953 	raw_malloc_validate_pools();
954 
955 	for (n = 0; n < malloc_pool_len; n++) {
956 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
957 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
958 
959 		if (buf_start > buf_end || pool_start > pool_end) {
960 			ret = true;	/* Wrapping buffers, shouldn't happen */
961 			goto out;
962 		}
963 
964 		if (buf_end > pool_start || buf_start < pool_end) {
965 			ret = true;
966 			goto out;
967 		}
968 	}
969 
970 out:
971 	malloc_unlock();
972 	return ret;
973 }
974